From: Vsevolod Stakhov Date: Mon, 1 Jul 2019 14:13:04 +0000 (+0100) Subject: [Project] Remove torch X-Git-Tag: 2.0~684^2~4 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=891b250b452f8e1963a99931f241ac75e34d0281;p=thirdparty%2Frspamd.git [Project] Remove torch --- diff --git a/CMakeLists.txt b/CMakeLists.txt index 00822aecba..2f455d9414 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,7 +57,6 @@ OPTION(ENABLE_JEMALLOC "Build rspamd with jemalloc allocator [default: OFF] OPTION(ENABLE_COVERAGE "Build rspamd with code coverage options [default: OFF]" OFF) OPTION(ENABLE_FULL_DEBUG "Build rspamd with all possible debug [default: OFF]" OFF) OPTION(ENABLE_UTILS "Build rspamd internal utils [default: OFF]" OFF) -OPTION(ENABLE_TORCH "Install torch7 with Rspamd [default: ON]" ON) OPTION(ENABLE_LIBUNWIND "Use libunwind to print crash traces [default: OFF]" OFF) OPTION(ENABLE_LUA_TRACE "Trace all Lua C API invocations [default: OFF]" OFF) @@ -1232,19 +1231,6 @@ IF(ENABLE_CLANG_PLUGIN MATCHES "ON") ADD_SUBDIRECTORY(clang-plugin) ENDIF() -IF(ENABLE_TORCH MATCHES "ON") - IF(WITH_LUAJIT) - ADD_SUBDIRECTORY(contrib/lua-torch/paths) - ADD_SUBDIRECTORY(contrib/lua-torch/torch7) - ADD_SUBDIRECTORY(contrib/lua-torch/nn) - ADD_SUBDIRECTORY(contrib/lua-torch/optim) - ADD_SUBDIRECTORY(contrib/lua-torch/decisiontree) - SET(WITH_TORCH 1) - ELSE() - MESSAGE(FATAL_ERROR "Cannot enable torch without luajit") - ENDIF() -ENDIF() - ADD_SUBDIRECTORY(src) ADD_SUBDIRECTORY(test) ADD_SUBDIRECTORY(utils) @@ -1337,10 +1323,6 @@ INSTALL(FILES "contrib/lua-tableshape/tableshape.lua" DESTINATION ${LUALIBDIR}) INSTALL(FILES "contrib/lua-lupa/lupa.lua" DESTINATION ${LUALIBDIR}) INSTALL(FILES "contrib/lua-lpeg/lpegre.lua" DESTINATION ${LUALIBDIR}) -IF(ENABLE_TORCH MATCHES "ON") - INSTALL(FILES "contrib/lua-moses/moses.lua" DESTINATION ${LUALIBDIR}) -ENDIF() - # systemd unit IF(CMAKE_SYSTEM_NAME STREQUAL "Linux" AND WANT_SYSTEMD_UNITS MATCHES "ON") INSTALL(FILES "rspamd.service" DESTINATION ${SYSTEMDDIR}) diff --git a/contrib/lua-moses/LICENSE b/contrib/lua-moses/LICENSE deleted file mode 100644 index f06dce3e8f..0000000000 --- a/contrib/lua-moses/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2012-2014 Roland Yonaba - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/contrib/lua-moses/moses.lua b/contrib/lua-moses/moses.lua deleted file mode 100644 index bb67dccad0..0000000000 --- a/contrib/lua-moses/moses.lua +++ /dev/null @@ -1,364 +0,0 @@ -local _ba='1.6.1'local aba,bba,cba,dba=next,type,select,pcall;local _ca,aca=setmetatable,getmetatable -local bca,cca=table.insert,table.sort;local dca,_da=table.remove,table.concat -local ada,bda,cda=math.randomseed,math.random,math.huge;local dda,__b,a_b=math.floor,math.max,math.min;local b_b=rawget -local c_b=table.unpack or unpack;local d_b,_ab=pairs,ipairs;local aab=os.clock;local bab={} -local function cab(dcb,_db)return dcb>_db end;local function dab(dcb,_db)return dcb<_db end -local function _bb(dcb,_db,adb)return(dcb<_db)and _db or -(dcb>adb and adb or dcb)end;local function abb(dcb,_db)return _db and true end -local function bbb(dcb)return not dcb end -local function cbb(dcb)local _db=0;for adb,bdb in d_b(dcb)do _db=_db+1 end;return _db end -local function dbb(dcb,_db,adb,...)local bdb;local cdb=adb or bab.identity;for ddb,__c in d_b(dcb)do -if not bdb then bdb=cdb(__c,...)else -local a_c=cdb(__c,...)bdb=_db(bdb,a_c)and bdb or a_c end end;return bdb end -local function _cb(dcb,_db,adb,bdb)for i=0,#dcb,_db do local cdb=bab.slice(dcb,i+1,i+_db) -if#cdb>0 then while -(#cdb<_db and bdb)do cdb[#cdb+1]=bdb end;adb(cdb)end end end -local function acb(dcb,_db,adb,bdb) -for i=0,#dcb,_db-1 do local cdb=bab.slice(dcb,i+1,i+_db)if -#cdb>0 and i+1 <#dcb then while(#cdb<_db and bdb)do cdb[#cdb+1]=bdb end -adb(cdb)end end end -local function bcb(dcb,_db,adb)if _db==0 then adb(dcb)end -for i=1,_db do dcb[_db],dcb[i]=dcb[i],dcb[_db]bcb(dcb,_db- -1,adb)dcb[_db],dcb[i]=dcb[i],dcb[_db]end end;local ccb=-1 -function bab.clear(dcb)for _db in d_b(dcb)do dcb[_db]=nil end;return dcb end -function bab.each(dcb,_db,...)for adb,bdb in d_b(dcb)do _db(adb,bdb,...)end end -function bab.eachi(dcb,_db,...) -local adb=bab.sort(bab.select(bab.keys(dcb),function(bdb,cdb)return bab.isInteger(cdb)end))for bdb,cdb in _ab(adb)do _db(cdb,dcb[cdb],...)end end -function bab.at(dcb,...)local _db={}for adb,bdb in _ab({...})do -if bab.has(dcb,bdb)then _db[#_db+1]=dcb[bdb]end end;return _db end -function bab.count(dcb,_db)if bab.isNil(_db)then return bab.size(dcb)end;local adb=0 -bab.each(dcb,function(bdb,cdb)if -bab.isEqual(cdb,_db)then adb=adb+1 end end)return adb end -function bab.countf(dcb,_db,...)return bab.count(bab.map(dcb,_db,...),true)end -function bab.cycle(dcb,_db)_db=_db or 1;if _db<=0 then return bab.noop end;local adb,bdb;local cdb=0 -while true do -return -function()adb=adb and -aba(dcb,adb)or aba(dcb) -bdb=not bdb and adb or bdb;if _db then cdb=(adb==bdb)and cdb+1 or cdb -if cdb>_db then return end end;return adb,dcb[adb]end end end -function bab.map(dcb,_db,...)local adb={} -for bdb,cdb in d_b(dcb)do local ddb,__c,a_c=bdb,_db(bdb,cdb,...)adb[a_c and __c or ddb]= -a_c or __c end;return adb end;function bab.reduce(dcb,_db,adb) -for bdb,cdb in d_b(dcb)do if adb==nil then adb=cdb else adb=_db(adb,cdb)end end;return adb end;function bab.reduceby(dcb,_db,adb,bdb,...)return -bab.reduce(bab.select(dcb,bdb,...),_db,adb)end;function bab.reduceRight(dcb,_db,adb)return -bab.reduce(bab.reverse(dcb),_db,adb)end -function bab.mapReduce(dcb,_db,adb) -local bdb={}for cdb,ddb in d_b(dcb)do bdb[cdb]=not adb and ddb or _db(adb,ddb) -adb=bdb[cdb]end;return bdb end;function bab.mapReduceRight(dcb,_db,adb) -return bab.mapReduce(bab.reverse(dcb),_db,adb)end -function bab.include(dcb,_db)local adb= -bab.isFunction(_db)and _db or bab.isEqual;for bdb,cdb in d_b(dcb)do if adb(cdb,_db)then -return true end end;return false end -function bab.detect(dcb,_db) -local adb=bab.isFunction(_db)and _db or bab.isEqual;for bdb,cdb in d_b(dcb)do if adb(cdb,_db)then return bdb end end end -function bab.where(dcb,_db) -local adb=bab.select(dcb,function(bdb,cdb) -for ddb in d_b(_db)do if cdb[ddb]~=_db[ddb]then return false end end;return true end)return#adb>0 and adb or nil end -function bab.findWhere(dcb,_db) -local adb=bab.detect(dcb,function(bdb)for cdb in d_b(_db)do -if _db[cdb]~=bdb[cdb]then return false end end;return true end)return adb and dcb[adb]end -function bab.select(dcb,_db,...)local adb={}for bdb,cdb in d_b(dcb)do -if _db(bdb,cdb,...)then adb[#adb+1]=cdb end end;return adb end -function bab.reject(dcb,_db,...)local adb=bab.map(dcb,_db,...)local bdb={}for cdb,ddb in d_b(adb)do if not ddb then -bdb[#bdb+1]=dcb[cdb]end end;return bdb end -function bab.all(dcb,_db,...)return( (#bab.select(bab.map(dcb,_db,...),abb))== -cbb(dcb))end -function bab.invoke(dcb,_db,...)local adb={...} -return -bab.map(dcb,function(bdb,cdb) -if bab.isTable(cdb)then -if bab.has(cdb,_db)then -if -bab.isCallable(cdb[_db])then return cdb[_db](cdb,c_b(adb))else return cdb[_db]end else -if bab.isCallable(_db)then return _db(cdb,c_b(adb))end end elseif bab.isCallable(_db)then return _db(cdb,c_b(adb))end end)end -function bab.pluck(dcb,_db)return -bab.reject(bab.map(dcb,function(adb,bdb)return bdb[_db]end),bbb)end;function bab.max(dcb,_db,...)return dbb(dcb,cab,_db,...)end;function bab.min(dcb,_db,...)return -dbb(dcb,dab,_db,...)end -function bab.shuffle(dcb,_db)if _db then ada(_db)end -local adb={} -bab.each(dcb,function(bdb,cdb)local ddb=dda(bda()*bdb)+1;adb[bdb]=adb[ddb] -adb[ddb]=cdb end)return adb end -function bab.same(dcb,_db) -return -bab.all(dcb,function(adb,bdb)return bab.include(_db,bdb)end)and -bab.all(_db,function(adb,bdb)return bab.include(dcb,bdb)end)end;function bab.sort(dcb,_db)cca(dcb,_db)return dcb end -function bab.sortBy(dcb,_db,adb) -local bdb=_db or bab.identity -if bab.isString(_db)then bdb=function(ddb)return ddb[_db]end end;adb=adb or dab;local cdb={} -bab.each(dcb,function(ddb,__c) -cdb[#cdb+1]={value=__c,transform=bdb(__c)}end) -cca(cdb,function(ddb,__c)return adb(ddb.transform,__c.transform)end)return bab.pluck(cdb,'value')end -function bab.groupBy(dcb,_db,...)local adb={...}local bdb={} -bab.each(dcb,function(cdb,ddb)local __c=_db(cdb,ddb,c_b(adb)) -if -bdb[__c]then bdb[__c][#bdb[__c]+1]=ddb else bdb[__c]={ddb}end end)return bdb end -function bab.countBy(dcb,_db,...)local adb={...}local bdb={} -bab.each(dcb,function(cdb,ddb)local __c=_db(cdb,ddb,c_b(adb))bdb[__c]=( -bdb[__c]or 0)+1 end)return bdb end -function bab.size(...)local dcb={...}local _db=dcb[1]if bab.isTable(_db)then return cbb(dcb[1])else -return cbb(dcb)end end;function bab.containsKeys(dcb,_db) -for adb in d_b(_db)do if not dcb[adb]then return false end end;return true end -function bab.sameKeys(dcb,_db)for adb in -d_b(dcb)do if not _db[adb]then return false end end;for adb in -d_b(_db)do if not dcb[adb]then return false end end -return true end -function bab.sample(dcb,_db,adb)_db=_db or 1;if _db<1 then return end;if _db==1 then if adb then ada(adb)end;return -dcb[bda(1,#dcb)]end;return -bab.slice(bab.shuffle(dcb,adb),1,_db)end -function bab.sampleProb(dcb,_db,adb)if adb then ada(adb)end;return -bab.select(dcb,function(bdb,cdb)return bda()<_db end)end;function bab.toArray(...)return{...}end -function bab.find(dcb,_db,adb)for i=adb or 1,#dcb do if -bab.isEqual(dcb[i],_db)then return i end end end -function bab.reverse(dcb)local _db={}for i=#dcb,1,-1 do _db[#_db+1]=dcb[i]end;return _db end;function bab.fill(dcb,_db,adb,bdb)bdb=bdb or bab.size(dcb) -for i=adb or 1,bdb do dcb[i]=_db end;return dcb end -function bab.selectWhile(dcb,_db,...) -local adb={} -for bdb,cdb in _ab(dcb)do if _db(bdb,cdb,...)then adb[bdb]=cdb else break end end;return adb end -function bab.dropWhile(dcb,_db,...)local adb -for bdb,cdb in _ab(dcb)do if not _db(bdb,cdb,...)then adb=bdb;break end end;if bab.isNil(adb)then return{}end;return bab.rest(dcb,adb)end -function bab.sortedIndex(dcb,_db,adb,bdb)local cdb=adb or dab;if bdb then bab.sort(dcb,cdb)end;for i=1,#dcb do if not -cdb(dcb[i],_db)then return i end end -return#dcb+1 end -function bab.indexOf(dcb,_db)for k=1,#dcb do if dcb[k]==_db then return k end end end -function bab.lastIndexOf(dcb,_db)local adb=bab.indexOf(bab.reverse(dcb),_db)if adb then return -#dcb-adb+1 end end;function bab.findIndex(dcb,_db,...) -for k=1,#dcb do if _db(k,dcb[k],...)then return k end end end -function bab.findLastIndex(dcb,_db,...) -local adb=bab.findIndex(bab.reverse(dcb),_db,...)if adb then return#dcb-adb+1 end end;function bab.addTop(dcb,...) -bab.each({...},function(_db,adb)bca(dcb,1,adb)end)return dcb end;function bab.push(dcb,...)bab.each({...},function(_db,adb) -dcb[#dcb+1]=adb end) -return dcb end -function bab.pop(dcb,_db) -_db=a_b(_db or 1,#dcb)local adb={} -for i=1,_db do local bdb=dcb[1]adb[#adb+1]=bdb;dca(dcb,1)end;return c_b(adb)end -function bab.unshift(dcb,_db)_db=a_b(_db or 1,#dcb)local adb={}for i=1,_db do local bdb=dcb[#dcb] -adb[#adb+1]=bdb;dca(dcb)end;return c_b(adb)end -function bab.pull(dcb,...) -for _db,adb in _ab({...})do for i=#dcb,1,-1 do -if bab.isEqual(dcb[i],adb)then dca(dcb,i)end end end;return dcb end -function bab.removeRange(dcb,_db,adb)local bdb=bab.clone(dcb)local cdb,ddb=(aba(bdb)),#bdb -if ddb<1 then return bdb end;_db=_bb(_db or cdb,cdb,ddb) -adb=_bb(adb or ddb,cdb,ddb)if adb<_db then return bdb end;local __c=adb-_db+1;local a_c=_db;while __c>0 do -dca(bdb,a_c)__c=__c-1 end;return bdb end -function bab.chunk(dcb,_db,...)if not bab.isArray(dcb)then return dcb end;local adb,bdb,cdb={},0 -local ddb=bab.map(dcb,_db,...) -bab.each(ddb,function(__c,a_c)cdb=(cdb==nil)and a_c or cdb;bdb=( -(a_c~=cdb)and(bdb+1)or bdb) -if not adb[bdb]then adb[bdb]={dcb[__c]}else adb[bdb][ -#adb[bdb]+1]=dcb[__c]end;cdb=a_c end)return adb end -function bab.slice(dcb,_db,adb)return -bab.select(dcb,function(bdb)return -(bdb>= (_db or aba(dcb))and bdb<= (adb or#dcb))end)end;function bab.first(dcb,_db)local adb=_db or 1 -return bab.slice(dcb,1,a_b(adb,#dcb))end -function bab.initial(dcb,_db) -if _db and _db<0 then return end;return -bab.slice(dcb,1,_db and#dcb- (a_b(_db,#dcb))or#dcb-1)end;function bab.last(dcb,_db)if _db and _db<=0 then return end -return bab.slice(dcb,_db and -#dcb-a_b(_db-1,#dcb-1)or 2,#dcb)end;function bab.rest(dcb,_db)if _db and -_db>#dcb then return{}end -return bab.slice(dcb, -_db and __b(1,a_b(_db,#dcb))or 1,#dcb)end;function bab.nth(dcb,_db) -return dcb[_db]end;function bab.compact(dcb)return -bab.reject(dcb,function(_db,adb)return not adb end)end -function bab.flatten(dcb,_db)local adb= -_db or false;local bdb;local cdb={} -for ddb,__c in d_b(dcb)do -if bab.isTable(__c)then bdb=adb and __c or -bab.flatten(__c) -bab.each(bdb,function(a_c,b_c)cdb[#cdb+1]=b_c end)else cdb[#cdb+1]=__c end end;return cdb end -function bab.difference(dcb,_db)if not _db then return bab.clone(dcb)end;return -bab.select(dcb,function(adb,bdb)return not -bab.include(_db,bdb)end)end -function bab.union(...)return bab.uniq(bab.flatten({...}))end -function bab.intersection(dcb,...)local _db={...}local adb={} -for bdb,cdb in _ab(dcb)do if -bab.all(_db,function(ddb,__c)return bab.include(__c,cdb)end)then bca(adb,cdb)end end;return adb end -function bab.symmetricDifference(dcb,_db)return -bab.difference(bab.union(dcb,_db),bab.intersection(dcb,_db))end -function bab.unique(dcb)local _db={}for i=1,#dcb do if not bab.find(_db,dcb[i])then -_db[#_db+1]=dcb[i]end end;return _db end -function bab.isunique(dcb)return bab.isEqual(dcb,bab.unique(dcb))end -function bab.zip(...)local dcb={...} -local _db=bab.max(bab.map(dcb,function(bdb,cdb)return#cdb end))local adb={}for i=1,_db do adb[i]=bab.pluck(dcb,i)end;return adb end -function bab.append(dcb,_db)local adb={}for bdb,cdb in _ab(dcb)do adb[bdb]=cdb end;for bdb,cdb in _ab(_db)do -adb[#adb+1]=cdb end;return adb end -function bab.interleave(...)return bab.flatten(bab.zip(...))end;function bab.interpose(dcb,_db)return -bab.flatten(bab.zip(_db,bab.rep(dcb,#_db-1)))end -function bab.range(...) -local dcb={...}local _db,adb,bdb -if#dcb==0 then return{}elseif#dcb==1 then adb,_db,bdb=dcb[1],0,1 elseif#dcb==2 then -_db,adb,bdb=dcb[1],dcb[2],1 elseif#dcb==3 then _db,adb,bdb=dcb[1],dcb[2],dcb[3]end;if(bdb and bdb==0)then return{}end;local cdb={} -local ddb=__b(dda((adb-_db)/bdb),0)for i=1,ddb do cdb[#cdb+1]=_db+bdb*i end;if#cdb>0 then -bca(cdb,1,_db)end;return cdb end -function bab.rep(dcb,_db)local adb={}for i=1,_db do adb[#adb+1]=dcb end;return adb end;function bab.partition(dcb,_db,adb)if _db<=0 then return end -return coroutine.wrap(function() -_cb(dcb,_db or 1,coroutine.yield,adb)end)end;function bab.sliding(dcb,_db,adb)if -_db<=1 then return end -return coroutine.wrap(function() -acb(dcb,_db or 2,coroutine.yield,adb)end)end -function bab.permutation(dcb)return -coroutine.wrap(function()bcb(dcb, -#dcb,coroutine.yield)end)end;function bab.invert(dcb)local _db={} -bab.each(dcb,function(adb,bdb)_db[bdb]=adb end)return _db end -function bab.concat(dcb,_db,adb,bdb) -local cdb=bab.map(dcb,function(ddb,__c)return -tostring(__c)end)return _da(cdb,_db,adb or 1,bdb or#dcb)end;function bab.noop()return end;function bab.identity(dcb)return dcb end;function bab.constant(dcb)return -function()return dcb end end -function bab.memoize(dcb,_db) -local adb=_ca({},{__mode='kv'})local bdb=_db or bab.identity;return -function(...)local cdb=bdb(...)local ddb=adb[cdb]if not ddb then -adb[cdb]=dcb(...)end;return adb[cdb]end end;function bab.once(dcb)local _db=0;local adb={} -return function(...)_db=_db+1;if _db<=1 then adb={...}end -return dcb(c_b(adb))end end -function bab.before(dcb,_db) -local adb=0;local bdb={}return -function(...)adb=adb+1;if adb<=_db then bdb={...}end;return dcb(c_b(bdb))end end -function bab.after(dcb,_db)local adb,bdb=_db,0;return -function(...)bdb=bdb+1;if bdb>=adb then return dcb(...)end end end -function bab.compose(...)local dcb=bab.reverse{...} -return function(...)local _db,adb=true -for bdb,cdb in _ab(dcb)do if _db then _db=false -adb=cdb(...)else adb=cdb(adb)end end;return adb end end -function bab.pipe(dcb,...)return bab.compose(...)(dcb)end -function bab.complement(dcb)return function(...)return not dcb(...)end end;function bab.juxtapose(dcb,...)local _db={} -bab.each({...},function(adb,bdb)_db[#_db+1]=bdb(dcb)end)return c_b(_db)end -function bab.wrap(dcb,_db)return function(...)return -_db(dcb,...)end end -function bab.times(dcb,_db,...)local adb={}for i=1,dcb do adb[i]=_db(i,...)end;return adb end -function bab.bind(dcb,_db)return function(...)return dcb(_db,...)end end;function bab.bind2(dcb,_db) -return function(adb,...)return dcb(adb,_db,...)end end;function bab.bindn(dcb,...)local _db={...} -return function(...)return -dcb(c_b(bab.append(_db,{...})))end end -function bab.bindAll(dcb,...)local _db={...} -for adb,bdb in -_ab(_db)do local cdb=dcb[bdb]if cdb then dcb[bdb]=bab.bind(cdb,dcb)end end;return dcb end -function bab.uniqueId(dcb,...)ccb=ccb+1 -if dcb then if bab.isString(dcb)then return dcb:format(ccb)elseif -bab.isFunction(dcb)then return dcb(ccb,...)end end;return ccb end -function bab.iterator(dcb,_db)return function()_db=dcb(_db)return _db end end -function bab.array(...)local dcb={}for _db in...do dcb[#dcb+1]=_db end;return dcb end;function bab.flip(dcb)return -function(...)return dcb(c_b(bab.reverse({...})))end end;function bab.over(...) -local dcb={...} -return function(...)local _db={}for adb,bdb in _ab(dcb)do _db[#_db+1]=bdb(...)end -return _db end end;function bab.overEvery(...) -local dcb=bab.over(...) -return function(...)return -bab.reduce(dcb(...),function(_db,adb)return _db and adb end)end end;function bab.overSome(...) -local dcb=bab.over(...) -return function(...)return -bab.reduce(dcb(...),function(_db,adb)return _db or adb end)end end -function bab.overArgs(dcb,...) -local _db={...}return -function(...)local adb={...}for i=1,#_db do local bdb=_db[i] -if adb[i]then adb[i]=bdb(adb[i])end end;return dcb(c_b(adb))end end -function bab.partial(dcb,...)local _db={...} -return -function(...)local adb={...}local bdb={}for cdb,ddb in _ab(_db)do bdb[cdb]= -(ddb=='_')and bab.pop(adb)or ddb end;return -dcb(c_b(bab.append(bdb,adb)))end end -function bab.partialRight(dcb,...)local _db={...} -return -function(...)local adb={...}local bdb={} -for k=1,#_db do bdb[k]= -(_db[k]=='_')and bab.pop(adb)or _db[k]end;return dcb(c_b(bab.append(adb,bdb)))end end -function bab.curry(dcb,_db)_db=_db or 2;local adb={} -local function bdb(cdb)if _db==1 then return dcb(cdb)end;if cdb~=nil then -adb[#adb+1]=cdb end;if#adb<_db then return bdb else local ddb={dcb(c_b(adb))}adb={}return -c_b(ddb)end end;return bdb end -function bab.time(dcb,...)local _db=aab()local adb={dcb(...)}return aab()-_db,c_b(adb)end;function bab.keys(dcb)local _db={} -bab.each(dcb,function(adb)_db[#_db+1]=adb end)return _db end;function bab.values(dcb)local _db={} -bab.each(dcb,function(adb,bdb)_db[ -#_db+1]=bdb end)return _db end;function bab.kvpairs(dcb)local _db={} -bab.each(dcb,function(adb,bdb)_db[ -#_db+1]={adb,bdb}end)return _db end -function bab.toObj(dcb)local _db={}for adb,bdb in -_ab(dcb)do _db[bdb[1]]=bdb[2]end;return _db end -function bab.property(dcb)return function(_db)return _db[dcb]end end -function bab.propertyOf(dcb)return function(_db)return dcb[_db]end end;function bab.toBoolean(dcb)return not not dcb end -function bab.extend(dcb,...)local _db={...} -bab.each(_db,function(adb,bdb)if -bab.isTable(bdb)then -bab.each(bdb,function(cdb,ddb)dcb[cdb]=ddb end)end end)return dcb end -function bab.functions(dcb,_db)dcb=dcb or bab;local adb={} -bab.each(dcb,function(cdb,ddb)if bab.isFunction(ddb)then -adb[#adb+1]=cdb end end)if not _db then return bab.sort(adb)end;local bdb=aca(dcb) -if -bdb and bdb.__index then local cdb=bab.functions(bdb.__index)bab.each(cdb,function(ddb,__c) -adb[#adb+1]=__c end)end;return bab.sort(adb)end -function bab.clone(dcb,_db)if not bab.isTable(dcb)then return dcb end;local adb={} -bab.each(dcb,function(bdb,cdb)if -bab.isTable(cdb)then -if not _db then adb[bdb]=bab.clone(cdb,_db)else adb[bdb]=cdb end else adb[bdb]=cdb end end)return adb end;function bab.tap(dcb,_db,...)_db(dcb,...)return dcb end;function bab.has(dcb,_db)return -dcb[_db]~=nil end -function bab.pick(dcb,...)local _db=bab.flatten{...} -local adb={} -bab.each(_db,function(bdb,cdb) -if not bab.isNil(dcb[cdb])then adb[cdb]=dcb[cdb]end end)return adb end -function bab.omit(dcb,...)local _db=bab.flatten{...}local adb={} -bab.each(dcb,function(bdb,cdb)if -not bab.include(_db,bdb)then adb[bdb]=cdb end end)return adb end;function bab.template(dcb,_db) -bab.each(_db or{},function(adb,bdb)if not dcb[adb]then dcb[adb]=bdb end end)return dcb end -function bab.isEqual(dcb,_db,adb) -local bdb=bba(dcb)local cdb=bba(_db)if bdb~=cdb then return false end -if bdb~='table'then return(dcb==_db)end;local ddb=aca(dcb)local __c=aca(_db)if adb then -if -(ddb or __c)and(ddb.__eq or __c.__eq)then return -ddb.__eq(dcb,_db)or __c.__eq(_db,dcb)or(dcb==_db)end end;if bab.size(dcb)~= -bab.size(_db)then return false end;for a_c,b_c in d_b(dcb)do local c_c=_db[a_c] -if -bab.isNil(c_c)or not bab.isEqual(b_c,c_c,adb)then return false end end -for a_c,b_c in d_b(_db)do -local c_c=dcb[a_c]if bab.isNil(c_c)then return false end end;return true end -function bab.result(dcb,_db,...) -if dcb[_db]then if bab.isCallable(dcb[_db])then return dcb[_db](dcb,...)else return -dcb[_db]end end;if bab.isCallable(_db)then return _db(dcb,...)end end;function bab.isTable(dcb)return bba(dcb)=='table'end -function bab.isCallable(dcb)return -( -bab.isFunction(dcb)or -(bab.isTable(dcb)and aca(dcb)and aca(dcb).__call~=nil)or false)end -function bab.isArray(dcb)if not bab.isTable(dcb)then return false end;local _db=0 -for adb in -d_b(dcb)do _db=_db+1;if bab.isNil(dcb[_db])then return false end end;return true end -function bab.isIterable(dcb)return bab.toBoolean((dba(d_b,dcb)))end -function bab.isEmpty(dcb)if bab.isNil(dcb)then return true end;if bab.isString(dcb)then -return#dcb==0 end -if bab.isTable(dcb)then return aba(dcb)==nil end;return true end;function bab.isString(dcb)return bba(dcb)=='string'end;function bab.isFunction(dcb)return -bba(dcb)=='function'end;function bab.isNil(dcb) -return dcb==nil end -function bab.isNumber(dcb)return bba(dcb)=='number'end -function bab.isNaN(dcb)return bab.isNumber(dcb)and dcb~=dcb end -function bab.isFinite(dcb)if not bab.isNumber(dcb)then return false end;return -dcb>-cda and dcb' .. self.leftChild:recursivetostring(indent .. ' ') - end - if self.rightChild then - res = res .. indent .. 'False->' .. self.rightChild:recursivetostring(indent .. ' ') - end - end - return res -end - -function CartNode:clone() - return CartNode(self.nodeId, self.leftChild, self.rightChild, self.splitFeatureId, self.splitFeatureValue, self.score, self.splitGain) -end diff --git a/contrib/lua-torch/decisiontree/CartTrainer.lua b/contrib/lua-torch/decisiontree/CartTrainer.lua deleted file mode 100644 index 63ae6c1487..0000000000 --- a/contrib/lua-torch/decisiontree/CartTrainer.lua +++ /dev/null @@ -1,180 +0,0 @@ -local dt = require "decisiontree._env" -local _ = require "moses" - -local CartTrainer = torch.class("dt.CartTrainer", dt) - --- Generic CART trainer -function CartTrainer:__init(dataset, minLeafSize, maxLeafNodes) - assert(torch.isTypeOf(dataset, 'dt.DataSet')) - self.dataset = dataset - self.minLeafSize = assert(minLeafSize) -- min examples per leaf - self.maxLeafNodes = assert(maxLeafNodes) -- max leaf nodes in tree - - -- by default, single thread - self.parallelMode = 'singlethread' -end - -function CartTrainer:train(rootTreeState, activeFeatures) - assert(torch.isTypeOf(rootTreeState, 'dt.TreeState')) - assert(torch.isTensor(activeFeatures)) - local root = dt.CartNode() - root.id = 0 - root.score = rootTreeState:score(self.dataset) - - local nleaf = 1 - - -- TODO : nodeparallel: parallelize here. The queue is a workqueue. - local queue = {} - table.insert(queue, 1, {cartNode=root, treeState=rootTreeState}) - - while #queue > 0 and nleaf < self.maxLeafNodes do - local treeGrowerArgs = table.remove(queue, #queue) - local currentTreeState = treeGrowerArgs.treeState - - -- Note: if minLeafSize = 1 and maxLeafNode = inf, then each example will be its own leaf... - if self:hasEnoughTrainingExamplesToSplit(currentTreeState.exampleIds:size(1)) then - nleaf = self:processNode(nleaf, queue, treeGrowerArgs.cartNode, currentTreeState, activeFeatures) - end - end - - -- CartTree with random branching (when feature is missing) - local branchleft = function() return math.random() < 0.5 end - return dt.CartTree(root, branchleft), nleaf -end - -function CartTrainer:processNode(nleaf, queue, node, treeState, activeFeatures) - local bestSplit - if self.parallelMode == 'singlethread' then - bestSplit = self:findBestSplitForAllFeatures(treeState, activeFeatures) - elseif self.parallelMode == 'featureparallel' then - bestSplit = self:findBestSplitForAllFeaturesFP(treeState, activeFeatures) - else - error("Unrecognized parallel mode: " .. self.parallelMode) - end - - if bestSplit then - local leftTreeState, rightTreeState = treeState:branch(bestSplit, self.dataset) - assert(bestSplit.leftChildSize + bestSplit.rightChildSize == leftTreeState.exampleIds:size(1) + rightTreeState.exampleIds:size(1), "The left and right subtrees don't match the split found!") - self:setValuesAndCreateChildrenForNode(node, bestSplit, leftTreeState, rightTreeState, nleaf) - - table.insert(queue, 1, {cartNode=node.leftChild, treeState=leftTreeState}) - table.insert(queue, 1, {cartNode=node.rightChild, treeState=rightTreeState}) - - return nleaf + 1 - end - - return nleaf -end - -function CartTrainer:findBestSplitForAllFeatures(treeState, activeFeatures) - local timer = torch.Timer() - local bestSplit = treeState:findBestSplit(self.dataset, activeFeatures, self.minLeafSize, -1, -1) - - if bestSplit then - assert(torch.type(bestSplit) == 'table') - end - - if dt.PROFILE then - print("findBestSplitForAllFeatures time="..timer:time().real) - end - return bestSplit -end - --- Updates the parentNode with the bestSplit information by creates left/right child Nodes. -function CartTrainer:setValuesAndCreateChildrenForNode(parentNode, bestSplit, leftState, rightState, nleaf) - assert(torch.isTypeOf(parentNode, 'dt.CartNode')) - assert(torch.type(bestSplit) == 'table') - assert(torch.isTypeOf(leftState, 'dt.TreeState')) - assert(torch.isTypeOf(rightState, 'dt.TreeState')) - assert(torch.type(nleaf) == 'number') - - local leftChild = dt.CartNode() - leftChild.score = leftState:score(self.dataset) - leftChild.nodeId = 2 * nleaf - 1 - - local rightChild = dt.CartNode() - rightChild.score = rightState:score(self.dataset) - rightChild.nodeId = 2 * nleaf - - parentNode.splitFeatureId = bestSplit.splitId - parentNode.splitFeatureValue = bestSplit.splitValue - parentNode.leftChild = leftChild - parentNode.rightChild = rightChild - parentNode.splitGain = bestSplit.splitGain -end - --- We minimally need 2 * N examples in the parent to satisfy >= N examples per child -function CartTrainer:hasEnoughTrainingExamplesToSplit(count) - return count >= 2 * self.minLeafSize -end - --- call before training to enable feature-parallelization -function CartTrainer:featureParallel(workPool) - assert(self.parallelMode == 'singlethread', self.parallelMode) - self.parallelMode = 'featureparallel' - self.workPool = torch.type(workPool) == 'number' and dt.WorkPool(workPool) or workPool - assert(torch.isTypeOf(self.workPool, 'dt.WorkPool')) - - -- this deletes all SparseTensor hash maps so that they aren't serialized - self.dataset:deleteIndex() - - -- require the dt package - self.workPool:update('require', {libname='decisiontree',varname='dt'}) - -- setup worker store (each worker will have its own copy) - local store = { - dataset=self.dataset, - minLeafSize=self.minLeafSize - } - self.workPool:update('storeKeysValues', store) -end - --- feature parallel -function CartTrainer:findBestSplitForAllFeaturesFP(treeState, activeFeatures) - local timer = torch.Timer() - local bestSplit - if treeState.findBestSplitFP then - bestSplit = treeState:findBestSplitFP(self.dataset, activeFeatures, self.minLeafSize, self.workPool.nThread) - end - - if not bestSplit then - for i=1,self.workPool.nThread do - -- upvalues - local treeState = treeState - local shardId = i - local nShard = self.workPool.nThread - local featureIds = activeFeatures - -- closure - local task = function(store) - assert(store.dataset) - assert(store.minLeafSize) - if treeState.threadInitialize then - treeState:threadInitialize() - end - - local bestSplit = treeState:findBestSplit(store.dataset, featureIds, store.minLeafSize, shardId, nShard) - return bestSplit - end - - self.workPool:writeup('execute', task) - end - - for i=1,self.workPool.nThread do - local taskname, candidateSplit = self.workPool:read() - assert(taskname == 'execute') - if candidateSplit then - if ((not bestSplit) or candidateSplit.splitGain < bestSplit.splitGain) then - bestSplit = candidateSplit - end - end - end - end - - if bestSplit then - assert(torch.type(bestSplit) == 'table') - end - - if dt.PROFILE then - print("findBestSplitForAllFeaturesFP time="..timer:time().real) - end - return bestSplit -end diff --git a/contrib/lua-torch/decisiontree/CartTree.lua b/contrib/lua-torch/decisiontree/CartTree.lua deleted file mode 100644 index c74dfda9ec..0000000000 --- a/contrib/lua-torch/decisiontree/CartTree.lua +++ /dev/null @@ -1,90 +0,0 @@ -local _ = require "moses" -local dt = require 'decisiontree._env' - --- CART (classification-regression decision tree). --- The example is always branched to the left when the splitting feature is missing. -local CartTree = torch.class("dt.CartTree", "dt.DecisionTree", dt) - -function CartTree:__init(root, branchleft) - assert(torch.isTypeOf(root, 'dt.CartNode')) - self.root = root - self.branchleft = branchleft or function() return true end -end - --- TODO optimize this -function CartTree:score(input, stack, optimized) - if optimized == true and stack == nil and torch.isTensor(input) and input.isContiguous and input:isContiguous() and input:nDimension() == 2 then - return input.nn.CartTreeFastScore(input, self.root, input.new()) - end - return self:recursivescore(self.root, input, stack) -end - --- Continuous: if input[node.splitFeatureId] < node.splitFeatureValue then leftNode else rightNode --- Binary: if input[node.splitFeatureId] == 0 then leftNode else rightNode --- when stack is provided, it is returned as the third argument containing the stack of nodes from root to leaf -function CartTree:recursivescore(node, input, stack) - assert(torch.isTypeOf(node, 'dt.CartNode')) - - if stack then - stack = torch.type(stack) == 'table' and stack or {} - table.insert(stack, node) - end - - if not (node.leftChild or node.rightChild) then - return node.score, node.nodeId, stack - elseif not node.leftChild then - return self:recursivescore(node.rightChild, input, stack) - elseif not node.rightChild then - return self:recursivescore(node.leftChild, input, stack) - end - - local splitId = node.splitFeatureId - local splitVal = node.splitFeatureValue - - if input[splitId] then -- if has key - local featureVal = input[splitId] - local nextNode = featureVal < splitVal and node.leftChild or node.rightChild - return self:recursivescore(nextNode, input, stack) - end - - -- if feature is missing, branch left - local nextNode = self.branchleft() and node.leftChild or node.rightChild - return self:recursivescore(nextNode, input, stack) -end - -function CartTree:__tostring__() - return self.root:recursivetostring() -end - --- expects a stack returned by score -function CartTree:stackToString(stack, input) - assert(torch.type(stack) == 'table') - assert(torch.isTypeOf(stack[1], 'dt.CartNode')) - - local res = 'Stack nodes from root to leaf\n' - for i,node in ipairs(stack) do - if not (node.leftChild or node.rightChild) then - res = res .. "score="..node.score .. '\n' - else - local istr = '' - if input then - istr = '=' .. (input[node.splitFeatureId] or 'nil') - end - res = res .. 'input[' .. node.splitFeatureId .. ']' .. istr ..' < ' .. node.splitFeatureValue .. ' ? ' - res = res .. '(' .. ((node.leftChild and node.rightChild) and 'LR' or node.leftChild and 'L' or node.rightChild and 'R' or 'WAT?') .. ') ' - if node.leftChild == stack[i+1] then - res = res .. 'Left\n' - elseif node.rightChild == stack[i+1] then - res = res .. 'Right\n' - else - error"stackToString error" - end - end - end - return res .. #stack .. " nodes" -end - -function CartTree:clone() - return CartTree(self.root:clone(), self.branchleft) -end - diff --git a/contrib/lua-torch/decisiontree/DFD.lua b/contrib/lua-torch/decisiontree/DFD.lua deleted file mode 100644 index e4746212a7..0000000000 --- a/contrib/lua-torch/decisiontree/DFD.lua +++ /dev/null @@ -1,182 +0,0 @@ --- nn.DFD: Decision Forest Discretizer --- Takes a dense input and outputs a sparse output. --- Each node in the forest is its own feature. --- When a node is traversed, its commensurate feature takes on a value of 1. --- For all non-traversed nodes, the feature is 0. -local DFD, parent = torch.class("nn.DFD", "nn.Module") - --- TODO: add :type, as the default will convert the long tensors -function DFD:__init(df, onlyLastNode) - parent.__init(self) - if torch.type(df) == 'table' then - self:reconstructFromInfo(df) - else - assert(torch.type(df) == 'dt.DecisionForest') - - self.rootIds = torch.LongTensor() - -- nodeId of left and right child nodes - self.leftChild = torch.LongTensor() - self.rightChild = torch.LongTensor() - -- index and value of the feature that splits this node - self.splitFeatureId = torch.LongTensor() - self.splitFeatureValue = torch.Tensor() - -- initialize state given df - self:convertForest2Tensors(df) - self:clearState() - end - self.onlyLastNode = onlyLastNode - self.nTrees = self.rootIds:size(1) -end - --- converts a DecisionForest to efficient tensor representation -function DFD:convertForest2Tensors(df) - self.rootIds:resize(#df.trees) - - -- nodeId will map to featureId - local nodeId = 0 - -- sets nodeIds of all subnodes - -- and measures the maximum depth over all trees - local function recursiveTree(node, depth) - depth = (depth or 0) + 1 - local rdepth = depth - nodeId = nodeId + 1 - node._nodeId = nodeId - - if node.leftChild then - rdepth = math.max(rdepth, recursiveTree(node.leftChild, depth)) - end - if node.rightChild then - rdepth = math.max(rdepth, recursiveTree(node.rightChild, depth)) - end - return rdepth - end - - -- sum over trees of max depth - self.depth = 0 - for i,tree in ipairs(df.trees) do - assert(torch.isTypeOf(tree.root, 'dt.CartNode')) - self.depth = self.depth + recursiveTree(tree.root) - end - -- remove roots from depth - self.depth = self.depth - self.rootIds:size(1) - - -- total number of nodes in all trees - self.nNode = nodeId - - -- nodeId of left and right child nodes - self.leftChild:resize(self.nNode):fill(-1) - self.rightChild:resize(self.nNode):fill(-1) - -- index and value of the feature that splits this node - self.splitFeatureId:resize(self.nNode):fill(-1) - self.splitFeatureValue:resize(self.nNode):fill(-1) - - -- aggregates CartNode attributes to an efficient tensor representation - local function recursiveTree2(node) - local nodeId = assert(node._nodeId) - assert(self.splitFeatureId[nodeId] == -1) - - if node.leftChild then - self.leftChild[nodeId] = assert(node.leftChild._nodeId) - recursiveTree2(node.leftChild) - else - self.leftChild[nodeId] = 0 - end - - if node.rightChild then - self.rightChild[nodeId] = assert(node.rightChild._nodeId) - recursiveTree2(node.rightChild) - else - self.rightChild[nodeId] = 0 - end - - -- each node splits the dataset on a feature id-value pair - self.splitFeatureId[nodeId] = assert(node.splitFeatureId) - self.splitFeatureValue[nodeId] = assert(node.splitFeatureValue) - end - - for i,tree in ipairs(df.trees) do - self.rootIds[i] = assert(tree.root._nodeId) - recursiveTree2(tree.root) - end - - assert(self.leftChild:min() >= 0) - assert(self.rightChild:min() >= 0) -end - --- input is a batchsize x inputsize tensor -function DFD:updateOutput(input) - assert(torch.isTensor(input)) - assert(input:dim() == 2) - input = input:contiguous() - - local batchsize, inputsize = input:size(1), input:size(2) - local size = self.onlyLastNode and self.nTree or self.depth - - -- each sample's output keys is resized to maxdepth, which is the maximum size that it can take on - self.outputkeys = self.outputkeys or torch.LongTensor() - self.outputkeys:resize(batchsize, size) - -- values are 1 - self.outputvalues = self.outputvalues or input.new() - self.outputvalues:resize(batchsize, size):fill(1) - - self.output = input.nn.DFD_computeOutput(self.outputkeys, self.outputvalues, self.rootIds, self.leftChild, self.rightChild, self.splitFeatureId, self.splitFeatureValue, input, self.onlyLastNode) - return self.output -end - -function DFD:type(type, tensorCache) - if type then - local info = self:getReconstructionInfo() - for k, v in pairs(info) do - if torch.type(v) ~= 'torch.LongTensor' then - info[k] = nil - end - end - parent.type(self, type, tensorCache) - self:reconstructFromInfo(info) - return self - else - return parent.type(self) - end -end - -function DFD:updateGradInput() - error"Not Implemented" -end - -function DFD:clearState() - self.output = {{},{}} - self.taskbuffer = {} - self.outputkeys = nil - self.outputvalues = nil - self._range = nil - self._indices = nil - self._mask = nil -end - -function DFD:reconstructFromInfo(DFDinfo) - for k,v in pairs(DFDinfo) do - self[k] = v - end - assert(self.leftChild:nDimension() == 1) - assert(self.rightChild:nDimension() == 1) - assert(self.leftChild:size(1) == self.nNode) - assert(self.rightChild:size(1) == self.nNode) - assert(self.leftChild:min() >= 0) - assert(self.rightChild:min() >= 0) - assert(self.splitFeatureId:nDimension() == 1) - assert(self.splitFeatureValue:nDimension() == 1) - assert(self.splitFeatureId:size(1) == self.splitFeatureValue:size(1)) -end - -function DFD:getReconstructionInfo() - local DFDinfo = { - nNode = self.nNode, - rootIds = self.rootIds, - leftChild = self.leftChild, - rightChild = self.rightChild, - splitFeatureId = self.splitFeatureId, - splitFeatureValue = self.splitFeatureValue, - depth = self.depth - } - return DFDinfo -end diff --git a/contrib/lua-torch/decisiontree/DataSet.lua b/contrib/lua-torch/decisiontree/DataSet.lua deleted file mode 100644 index 15058a7c6a..0000000000 --- a/contrib/lua-torch/decisiontree/DataSet.lua +++ /dev/null @@ -1,142 +0,0 @@ -local dt = require "decisiontree._env" - -local DataSet = torch.class("dt.DataSet", dt) - -function DataSet:__init(input, target, nThreads) - if torch.type(input) == 'table' then - assert(torch.isTypeOf(input[1], 'torch.SparseTensor')) - else - assert(torch.isTensor(input)) - end - self.input = input - assert(torch.isTensor(target)) - self.target = target - self.nThreads = nThreads or 1 - - self.sortedFeatureValues, self.featureIds = self:sortFeatureValues(input) -end - --- group examples by featureId. For each featureId, sort examples by featureValue (ascending order) --- returns a table mapping featureIds to sorted lists of exampleIds --- e.g. {featureId={example1,example2,example3}} -function DataSet:sortFeatureValues(inputs) - local isSparse = torch.typename(inputs[1]):match('torch.*SparseTensor') - assert(isSparse or torch.isTensor(inputs)) - - local featureIds = torch.LongTensor() - local dataset = {} -- TODO use tds.Hash (will require SparseTensor to be userdata) - if isSparse then - local proto = inputs[1].values - -- get list of featureIds - local featureMap = {} - for i,input in ipairs(inputs) do - input.keys:apply(function(key) - featureMap[key] = (featureMap[key] or 0) + 1 - end) - end - local _ = require "moses" - featureIds = featureIds.new(_.keys(featureMap)) - local featureCounts = torch.LongTensor(featureIds:size(1)) - for i=1,featureIds:size(1) do - featureCounts[i] = featureMap[featureIds[i]] - end - - for i=1,featureIds:size(1) do - local featureId = featureIds[i] - local featureCount = featureCounts[i] - dataset[featureId] = { - values=proto.new(featureCount), - examples=torch.LongTensor(featureCount), - i=0 - } - end - - for exampleId,input in ipairs(inputs) do - local sparseIdx = 0 - input.keys:apply(function(key) - sparseIdx = sparseIdx + 1 - local f = dataset[key] - f.i = f.i + 1 - f.values[f.i] = input.values[sparseIdx] - f.examples[f.i] = exampleId - end) - end - - local sortVal, sortIdx = proto.new(), torch.LongTensor() - for featureId,f in pairs(dataset) do - assert(f.values:size(1) == f.i) - sortVal:sort(sortIdx, f.values, 1, false) - - local sortedExampleIds = torch.LongTensor(f.i) - sortedExampleIds:index(f.examples, 1, sortIdx) - - dataset[featureId] = sortedExampleIds - end - else - assert(torch.isTensor(inputs)) - featureIds:range(1,inputs:size(2)) - - for i=1,inputs:size(2) do - local featureId = i - local values = inputs:select(2, i) - local _, sortedFeatureExampleIds = values:sort(1, false) - dataset[featureId] = sortedFeatureExampleIds - end - end - - return dataset, featureIds -end - -function DataSet:getSortedFeature(featureId) - assert(self.sortedFeatureValues) - return self.sortedFeatureValues[featureId] -end - -function DataSet:size() - return self.target:size(1) -end - -function DataSet:getExampleIds() - if not self.exampleIds then - self.exampleIds = torch.LongTensor():range(1,self:size()) - end - return self.exampleIds -end - -function DataSet:countPositive(exampleIds) - assert(torch.type(exampleIds) == 'torch.LongTensor') - local dt = require 'decisiontree' - local buffer = dt.getBufferTable('DataSet') - buffer.tensor = buffer.tensor or self.target.new() - buffer.tensor:index(self.target, 1, exampleIds) - local nPositive = 0 - buffer.tensor:apply(function(x) - if x > 0 then nPositive = nPositive + 1 end - end) - return nPositive -end - -function DataSet:initScore() - self.score = self.score or torch.Tensor() - self.score:resize(self:size()):fill(0) -end - -function DataSet:buildIndex() - if torch.type(self.input) == 'table' then - for exampleId,input in ipairs(self.input) do - if torch.isTypeOf(input, 'torch.SparseTensor') then - input:buildIndex() - end - end - end -end - -function DataSet:deleteIndex() - if torch.type(self.input) == 'table' then - for exampleId,input in ipairs(self.input) do - if torch.isTypeOf(input, 'torch.SparseTensor') then - input:deleteIndex() - end - end - end -end diff --git a/contrib/lua-torch/decisiontree/DecisionForest.lua b/contrib/lua-torch/decisiontree/DecisionForest.lua deleted file mode 100644 index cac748e7e8..0000000000 --- a/contrib/lua-torch/decisiontree/DecisionForest.lua +++ /dev/null @@ -1,82 +0,0 @@ -local dt = require "decisiontree._env" - --- Decision forest that ensembles a bag of decision trees. -local DecisionForest = torch.class("dt.DecisionForest", "dt.DecisionTree", dt) - -function DecisionForest:__init(trees, weight, bias) - assert(torch.type(trees) == 'table') - self.trees = trees - if #trees == 0 then - self.weight = weight or torch.Tensor() - assert(torch.isTensor(self.weight)) - assert(self.weight:nElement() == 0) - else - assert(torch.isTypeOf(trees[1], 'dt.DecisionTree')) - self.weight = weight or torch.Tensor(#trees):fill(1) - assert(torch.isTensor(self.weight)) - assert(self.weight:dim() == 1) - assert(self.weight:min() >= 0, "Expecting positive weights") - assert(#trees == self.weight:size(1)) - end - - self.bias = bias or 0 - assert(torch.type(self.bias) == 'number') -end - -function DecisionForest:score(input, incrementalId) - assert(torch.isTensor(input)) - - local buffer = {} - if incrementalId then - self.buffers = self.buffers or {} - self.buffers[incrementalId] = self.buffers[incrementalId] or {} - buffer = self.buffers[incrementalId] - end - buffer.initialCounter = buffer.initialCounter or 0 - - -- TODO: score in parallel - local output - if torch.isTensor(input) and input.isContiguous and input:isContiguous() and input:nDimension() == 2 then - buffer.output = buffer.output or input.new() - output = buffer.output - assert(output:nElement() == 0 or output:size(1) == input:size(1)) - if output:nElement() == 0 then - output:resize(input:size(1)):fill(self.bias) - end - for i,tree in ipairs(self.trees) do - if i > buffer.initialCounter then - local score = tree:score(input, nil, true) - output:add(self.weight[i], score) - end - end - else - output = buffer.output or self.bias - for i,tree in ipairs(self.trees) do - if i > buffer.initialCounter then - output = output + tree:score(input) * self.weight[i] - end - end - buffer.output = output - end - - buffer.initialCounter = #self.trees - - return output -end - -function DecisionForest:add(tree, weight) - assert(torch.type(weight) == 'number') - assert(weight > 0) - table.insert(self.trees, tree) - self.weight:resize(#self.trees) - self.weight[#self.trees] = weight - return self -end - -function DecisionForest:clone() - local trees = {} - for i, tree in ipairs(self.trees) do - trees[i] = tree:clone() - end - return DecisionForest(trees, self.weight:clone(), self.bias) -end diff --git a/contrib/lua-torch/decisiontree/DecisionForestTrainer.lua b/contrib/lua-torch/decisiontree/DecisionForestTrainer.lua deleted file mode 100644 index fc903678ba..0000000000 --- a/contrib/lua-torch/decisiontree/DecisionForestTrainer.lua +++ /dev/null @@ -1,22 +0,0 @@ -local dt = require "decisiontree._env" - --- Interface for all decisionForestTrainers -local DFT = torch.class("dt.DecisionForestTrainer", dt) - --- Train a DecisionForest with examples, a table of valid featureIds and a dataset (i.e. sortedExamplesByFeatureId) -function DFT:train(examples, validFeatureIds, dataset) - assert(torch.type(examples) == "table") - assert(torch.isTypeOf(examples[1], "dt.LabeledExample")) - - assert(torch.type(validFeatureIds) == 'table') - - assert(torch.type(dataset) == 'table') - for k,v in pairs(dataset) do - assert(torch.type(v) == 'table') - assert(torch.isTypeOf(v[1], 'dt.LabeledExample')) - break - end - -- dataset is a table mapping featureIds to sorted lists of LabeledExamples - -- e.g. {featureId={example1,example2,example3}} - error"Not Implemented" -end diff --git a/contrib/lua-torch/decisiontree/DecisionTree.lua b/contrib/lua-torch/decisiontree/DecisionTree.lua deleted file mode 100644 index c61bc37577..0000000000 --- a/contrib/lua-torch/decisiontree/DecisionTree.lua +++ /dev/null @@ -1,12 +0,0 @@ -local dt = require "decisiontree._env" - --- An interface for decision trees. -local DecisionTree = torch.class("dt.DecisionTree", dt) - --- Score an input example and return the prediction score. --- input is a Tensor or SparseTensor --- return prediction score and nodeId -function DecisionTree:score(input) - error"Not Implemented" - return score, nodeId -end diff --git a/contrib/lua-torch/decisiontree/GBDT_common.h b/contrib/lua-torch/decisiontree/GBDT_common.h deleted file mode 100644 index eb993702d6..0000000000 --- a/contrib/lua-torch/decisiontree/GBDT_common.h +++ /dev/null @@ -1,106 +0,0 @@ -#include "khash.h" -#include - -#define computeGradientBoostLoss(g, h) (-(g)*(g)/(h)) - -// we use khash to make iteration faster than lua tables -KHASH_SET_INIT_INT64(long) - -// defines the data we need for running an instance of thet and its constructor/destructor -typedef struct { - khash_t(long)* exampleMap; - THLongTensor *exampleIdsWithFeature_cache; - long minLeafSize; -} GBRunData; - - -// allocates data that cannot be shared between threads -static void gb_local_create_run_data(GBRunData *run_data) { - run_data->exampleMap = kh_init(long); - run_data->exampleIdsWithFeature_cache = THLongTensor_new(); -} - -static void gb_create_run_data(GBRunData *run_data, int minLeafSize) { - gb_local_create_run_data(run_data); - run_data->minLeafSize = minLeafSize; -} - -static void gb_destroy_run_data(GBRunData *run_data) { - THLongTensor_free(run_data->exampleIdsWithFeature_cache); - kh_destroy(long, run_data->exampleMap); -} - -// initializes the data required by the optimizer for the given feature. -static THLongTensor *gb_internal_prepare(lua_State *L, THLongTensor *exampleIds, - THLongTensor *exampleIdsWithFeature_cache, int input_index, long feature_id, - khash_t(long)* exampleMap) { - long *exampleIds_data = THLongTensor_data(exampleIds); - long exampleIds_size = THLongTensor_size(exampleIds, 0); - - int ret = 0; - - // if the the input is a table, then we have a sparse dataset - if (lua_istable(L, input_index)) { - if (exampleIds_size == 0) { - return NULL; - } - else { - // loops over the examples' ids that this node has to evaluate and, if they have the feature - // we're looking for, marks them as present and stores them in the order provided by the - // dataset - THLongTensor_resize1d(exampleIdsWithFeature_cache, exampleIds_size); - kh_clear(long, exampleMap); - kh_resize(long, exampleMap, exampleIds_size*8); - long *exampleIdsWithFeature_data = THLongTensor_data(exampleIdsWithFeature_cache); - long j = 0; - // for each sample to be evaluated - for (long i = 0; i < exampleIds_size; i++) { - // gets the representation for the example - lua_pushinteger(L, exampleIds_data[i]); - lua_gettable(L, input_index); - - // builds the index, which happens only once per thread for efficiency - lua_pushstring(L, "buildIndex"); - lua_gettable(L, -2); - lua_pushvalue(L, -2); - lua_call(L, 1, 0); - - // tries to get the feature for this sample - lua_pushinteger(L, feature_id); - lua_gettable(L, -2); - // if present, then... - if (!lua_isnil(L, -1)) { - // saves the example - exampleIdsWithFeature_data[j] = exampleIds_data[i]; - j++; - - // marks it as present in the hash table - kh_put(long, exampleMap, exampleIds_data[i], &ret); - } - - lua_pop(L, 2); - } - - // resizes to fit only the samples that have the feature - THLongTensor_resize1d(exampleIdsWithFeature_cache, j); - kh_resize(long, exampleMap, j*8); - return exampleIdsWithFeature_cache; - } - } - else { - // if the input isn't a table, then it's dense and we cannot have exampleIds missing, so it - // depends on feature_id - // since exampleIds is fixed between calls and this is going to store the same values to the - // same position, we can cache it between calls - if (kh_size(exampleMap) == 0) { - kh_resize(long, exampleMap, exampleIds_size*8); - for (long i = 0; i < exampleIds_size; i++) { - kh_put(long, exampleMap, exampleIds_data[i], &ret); - } - } - // notice that we just return the given tensor of ids instead of copying it. the rest of the - // code handles this transparently - return exampleIds; - } -} - diff --git a/contrib/lua-torch/decisiontree/GiniState.lua b/contrib/lua-torch/decisiontree/GiniState.lua deleted file mode 100644 index 6dfed28452..0000000000 --- a/contrib/lua-torch/decisiontree/GiniState.lua +++ /dev/null @@ -1,54 +0,0 @@ -local dt = require 'decisiontree._env' - --- used by RandomForestTrainer -local GiniState, parent = torch.class("dt.GiniState", "dt.TreeState", dt) - -function GiniState:__init(exampleIds) - parent.__init(self, exampleIds) - self.nPositiveInLeftBranch = 0 - self.nPositiveInRightBranch = 0 -end - -function GiniState:score(dataset) - local dt = require 'decisiontree' - local nPositive = dataset:countPositive(self.exampleIds) - return dt.calculateLogitScore(nPositive, self.exampleIds:size(1)) -end - -function GiniState:initialize(exampleIdsWithFeature, dataset) - assert(torch.type(exampleIdsWithFeature) == 'torch.LongTensor') - assert(torch.isTypeOf(dataset, 'dt.DataSet')) - self.nPositiveInLeftBranch = dataset:countPositive(exampleIdsWithFeature) - self.nPositiveInRightBranch = 0 - - self.nExampleInLeftBranch = exampleIdsWithFeature:size(1) - self.nExampleInRightBranch = 0 -end - -function GiniState:update(exampleId, dataset) - assert(torch.type(exampleId) == 'number') - assert(torch.isTypeOf(dataset, 'dt.DataSet')) - if dataset.target[exampleId] > 0 then - self.nPositiveInLeftBranch = self.nPositiveInLeftBranch - 1 - self.nPositiveInRightBranch = self.nPositiveInRightBranch + 1 - end - - self.nExampleInLeftBranch = self.nExampleInLeftBranch - 1 - self.nExampleInRightBranch = self.nExampleInRightBranch + 1 -end - -function GiniState:computeSplitInfo(splitFeatureId, splitFeatureValue) - local dt = require 'decisiontree' - local gini = dt.computeGini(self.nExampleInLeftBranch, self.nPositiveInLeftBranch, self.nExampleInRightBranch, self.nPositiveInRightBranch) - local splitInfo = { - splitId = assert(splitFeatureId), - splitValue = assert(splitFeatureValue), - leftChildSize = assert(self.nExampleInLeftBranch), - leftPositiveCount = assert(self.nPositiveInLeftBranch), - rightChildSize = assert(self.nExampleInRightBranch), - rightPositiveCount = assert(self.nPositiveInRightBranch), - gini = assert(gini), - splitGain = gini - } - return splitInfo -end \ No newline at end of file diff --git a/contrib/lua-torch/decisiontree/GradientBoostState.lua b/contrib/lua-torch/decisiontree/GradientBoostState.lua deleted file mode 100644 index f268f3da80..0000000000 --- a/contrib/lua-torch/decisiontree/GradientBoostState.lua +++ /dev/null @@ -1,57 +0,0 @@ -local dt = require 'decisiontree._env' - -local GradientBoostState, parent = torch.class("dt.GradientBoostState", "dt.TreeState", dt) - -function GradientBoostState:__init(exampleIds, gradInput, hessInput) - parent.__init(self, exampleIds) - self.gradInput = gradInput - self.hessInput = hessInput -end - -function GradientBoostState:score(dataset) - local dt = require 'decisiontree' - local gradInput = self.gradInput:index(1, self.exampleIds) - local hessInput = self.hessInput:index(1, self.exampleIds) - return dt.computeNewtonScore(gradInput:sum(), hessInput:sum()) -end - --- calls _branch and encapsulates the left and right exampleIds into a TreeStates -function GradientBoostState:branch(splitInfo, dataset) - local leftExampleIds, rightExampleIds = self:_branch(splitInfo, dataset) - return self.new(leftExampleIds, self.gradInput, self.hessInput), self.new(rightExampleIds, self.gradInput, self.hessInput) -end - --- Partitions self given a splitInfo table, producing a pair of exampleIds corresponding to the left and right subtrees. -function GradientBoostState:_branch(splitInfo, dataset) - local input = dataset.input - -- if the input is dense, we can use the optimized version - if torch.isTensor(input) and input.isContiguous and input:isContiguous() and input:nDimension() == 2 then - return input.nn.GBDT_branch(splitInfo, input, self.exampleIds) - end - return parent._branch(self, splitInfo, dataset) -end - --- The following methods are supersets of each other. You can comment out them to re-use the lua --- version with just the provided core optimized - --- THIS ONE CANNOT BE COMMENTED OUT -function GradientBoostState:findBestFeatureSplit(dataset, featureId, minLeafSize) - local ret = self.hessInput.nn.GBDT_findBestFeatureSplit(self.exampleIds, dataset, featureId, minLeafSize, self.gradInput, self.hessInput) - return ret -end - --- finds the best split of examples in treeState among featureIds -function GradientBoostState:findBestSplit(dataset, featureIds, minLeafSize, shardId, nShard) - local ret = self.hessInput.nn.GBDT_findBestSplit(self.exampleIds, dataset, featureIds, minLeafSize, shardId, nShard, self.gradInput, self.hessInput) - return ret -end - --- finds the best split like the previous one, but performs feature parallelism. Note that the --- optimization is only applied if the input is dense -function GradientBoostState:findBestSplitFP(dataset, featureIds, minLeafSize, nThread) - local input = dataset.input - if torch.isTensor(input) and input.isContiguous and input:isContiguous() and input:nDimension() == 2 then - local ret = self.hessInput.nn.GBDT_findBestSplitFP(self.exampleIds, dataset, featureIds, minLeafSize, self.gradInput, self.hessInput, nThread) - return ret - end -end diff --git a/contrib/lua-torch/decisiontree/GradientBoostTrainer.lua b/contrib/lua-torch/decisiontree/GradientBoostTrainer.lua deleted file mode 100644 index 51299b109e..0000000000 --- a/contrib/lua-torch/decisiontree/GradientBoostTrainer.lua +++ /dev/null @@ -1,244 +0,0 @@ -local dt = require "decisiontree._env" - --- Gradient boosted decision tree trainer -local GradientBoostTrainer = torch.class("dt.GradientBoostTrainer", "dt.DecisionForestTrainer", dt) - -function GradientBoostTrainer:__init(opt) - assert(torch.type(opt) == 'table') - - assert(torch.isTypeOf(opt.treeTrainer, 'dt.CartTrainer')) - self.treeTrainer = opt.treeTrainer - - assert(torch.isTypeOf(opt.lossFunction, 'nn.Criterion')) - self.lossFunction = opt.lossFunction - - assert(torch.type(opt.shrinkage) == 'number') - assert(opt.shrinkage > 0) - self.shrinkage = opt.shrinkage - - assert(torch.type(opt.downsampleRatio) == 'number') - assert(opt.downsampleRatio > 0) - self.downsampleRatio = opt.downsampleRatio - - assert(torch.type(opt.nTree) == 'number') - assert(opt.nTree > 0) - self.nTree = opt.nTree - - evalFreq = evalFreq or -1 - assert(torch.type(opt.evalFreq) == 'number') - assert(torch.round(opt.evalFreq) == opt.evalFreq) - self.evalFreq = opt.evalFreq - - -- when non-positive, no early-stopping - earlyStop = earlyStop or (evalFreq-1) - assert(torch.type(opt.earlyStop) == 'number') - self.earlyStop = opt.earlyStop - - -- when non-positive, defaults to sqrt(#feature) - assert(torch.type(opt.featureBaggingSize) == 'number') - self.featureBaggingSize = opt.featureBaggingSize - - if opt.decisionForest then - assert(torch.isTypeOf(opt.decisionForest, 'dt.DecisionForest')) - end - self.decisionForest = opt.decisionForest - - self.useInitBias = opt.useInitBias -end - -function GradientBoostTrainer:computeBias(trainSet, verbose) - assert(torch.isTypeOf(trainSet, 'dt.DataSet')) - - if verbose then print("Use new bias generated from the training examples.") end - - return -0.5 * self.gradInput:sum() / self.hessInput:sum() -end - - -function GradientBoostTrainer:initialize(trainSet, verbose) - assert(torch.isTypeOf(trainSet, 'dt.DataSet')) - - trainSet:initScore() - self.gradInput, self.hessInput = self.lossFunction:backward2(trainSet.score, trainSet.target) - - -- used for early-stopping (see validate()) - self.stopCount = 0 - self.prevTrainLoss = math.huge - self.prevTestLoss = math.huge - - if verbose then print("Processing initial decision forest") end - - local decisionForest, bias - - if self.decisionForest then - local bias = self.useInitBias and self.decisionForest.bias or self:computeBias(trainSet, verbose) - - decisionForest = dt.DecisionForest(self.decisionForest.trees, self.decisionForest.weight, bias) - - local input = trainSet.input - if torch.isTensor(input) and input.isContiguous and input:isContiguous() then - score = decisionForest:score(input) - else - score:resize(trainSet:size()) - for exampleId=1,trainSet:size() do - score[exampleId] = decisionForest:score(input[exampleId]) - end - end - else - local bias = self:computeBias(trainSet, verbose) - decisionForest = dt.DecisionForest({}, torch.Tensor(), bias) - - trainSet.score:fill(bias) - end - - if verbose then print("Finish loading initial decision forest") end - - return decisionForest -end - --- Trains a decision forest of boosted decision trees. --- examples are the training examples. validExamples are used for cross-validation. -function GradientBoostTrainer:train(trainSet, featureIds, validSet, verbose) - assert(torch.isTypeOf(trainSet, 'dt.DataSet')) - assert(torch.type(featureIds) == 'torch.LongTensor') - assert(torch.isTypeOf(validSet, 'dt.DataSet')) - - local decisionForest = self:initialize(trainSet, verbose) - local bestDecisionForest - - if verbose then print(string.format("Get %d featureIds.", featureIds:size(1))) end - - local baggingSize = self.featureBaggingSize > 0 and self.featureBaggingSize or torch.round(math.sqrt(featureIds:size(1))) - local trainExampleIds = trainSet:getExampleIds() - local baggingIndices, activeFeatures - local treeExampleIds - - local timer = torch.Timer() - - for treeId = 1,self.nTree do - timer:reset() - if verbose then print(string.format("Begin processing tree number %d of %d", treeId, self.nTree)) end - - -- Get active features - activeFeatures = activeFeatures or torch.LongTensor() - if baggingSize < featureIds:size(1) then - if verbose then print(string.format("Tree %d: Bagging %d from %d features", treeId, baggingSize, featureIds:size(1))) end - - baggingIndices = baggingIndices or torch.LongTensor() - baggingIndices:randperm(featureIds:size(1)) - activeFeatures:index(featureIds, 1, baggingIndices:narrow(1,1,baggingSize)) - else - activeFeatures = featureIds - end - - -- Get data samples - if self.downsampleRatio < 0.99 then - local sampleSize = torch.round(trainSet:size() * self.downsampleRatio) - - if verbose then print(string.format("Tree %d: Downsampling %d of %d samples", treeId, sampleSize, trainSet:size())) end - - baggingIndices = baggingIndices or torch.LongTensor() - baggingIndices:randperm(trainSet:size()) - - treeExampleIds = treeExampleIds or torch.LongTensor() - treeExampleIds:index(trainExampleIds, 1, baggingIndices:narrow(1,1,sampleSize)) - else - treeExampleIds = trainExampleIds - end - - if verbose then print(string.format("Tree %d: training CART tree", treeId)) end - - local rootTreeState = dt.GradientBoostState(treeExampleIds, self.gradInput, self.hessInput) - local cartTree = self.treeTrainer:train(rootTreeState, activeFeatures) - - if verbose then print(string.format("Tree %d: finished training CART tree in %f seconds", treeId, timer:time().real)) end - - decisionForest:add(cartTree, self.shrinkage) - - -- update score - local predictionScore - local input = trainSet.input - if torch.isTensor(input) and input:isContiguous() then - predictionScore = cartTree:score(trainSet.input, nil, true) - else - local size = trainSet:size() - predictionScore = torch.Tensor(size) - for exampleId=1,size do - predictionScore[exampleId] = cartTree:score(trainSet.input[exampleId]) - end - end - trainSet.score:add(self.shrinkage, predictionScore) - self.gradInput, self.hessInput = self.lossFunction:backward2(trainSet.score, trainSet.target) - - if verbose then print(string.format("Tree %d: training complete in %f seconds", treeId, timer:time().real)) end - - -- cross-validation/early-stopping - if self.evalFreq > 0 and treeId % self.evalFreq == 0 then - timer:reset() - local stop, validLoss, bestDecisionForest = self:validate(trainSet, validSet, decisionForest, bestDecisionForest) - if dt.PROFILE then print("validate tree time: "..timer:time().real) end - if verbose then print(string.format("Loss: train=%7.4f, valid=%7.4f", 0, validLoss)) end - if stop then - if verbose then print(string.format("GBDT early stopped on tree %d", treeId)) end - break - end - - end - end - - return bestDecisionForest or decisionForest -end - -function dt.GradientBoostTrainer:validate(trainSet, validSet, decisionForest, bestDecisionForest) - assert(torch.isTypeOf(trainSet, 'dt.DataSet')) - assert(torch.isTypeOf(validSet, 'dt.DataSet')) - assert(torch.isTypeOf(decisionForest, 'dt.DecisionForest')) - assert(not bestDecisionForest or torch.isTypeOf(decisionForest, 'dt.DecisionForest')) - - -- buffer - local buffer = dt.getBufferTable('GradientBoost') - buffer.tensor = buffer.tensor or trainSet.score.new() - local score = buffer.tensor - - -- per thread loss function (tensors are shared) - local lossname = torch.typename(self.lossFunction) - buffer[lossname] = buffer[lossname] or self.lossFunction:clone() - local lossFunction = buffer[lossname] - - -- TODO batch this for large datasets - local input = validSet.input - if torch.isTensor(input) and input.isContiguous and input:isContiguous() then - score = decisionForest:score(input, 'val') - else - score:resize(validSet:size()) - for exampleId=1,validSet:size() do - score[exampleId] = decisionForest:score(input[exampleId], 'val') - end - end - local validLoss = lossFunction:forward(score, validSet.target) - - -- early stop is not enabled when earlyStop=0 - local stop = false - if self.earlyStop > 0 then - -- Track test loss and detect early stop - if self.prevTestLoss - validLoss < 0 then - self.stopCount = self.stopCount + 1 - else - bestDecisionForest = decisionForest:clone() - self.stopCount = 0 - end - - stop = self.stopCount >= self.earlyStop - end - - self.prevTestLoss = validLoss - - return stop, validLoss, bestDecisionForest -end - -function GradientBoostTrainer:getName() - return string.format( - "gbdt-dRatio-%s-maxLeaf-%s-minExample-%s-nTree-%s-shrinkage-%s", - self.downsampleRatio, self.maxLeafNodes, self.minLeafSize, self.nTree, self.shrinkage - ) -end diff --git a/contrib/lua-torch/decisiontree/LICENSE b/contrib/lua-torch/decisiontree/LICENSE deleted file mode 100644 index 8dada3edaf..0000000000 --- a/contrib/lua-torch/decisiontree/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/contrib/lua-torch/decisiontree/LogitBoostCriterion.lua b/contrib/lua-torch/decisiontree/LogitBoostCriterion.lua deleted file mode 100644 index 5b9eb60287..0000000000 --- a/contrib/lua-torch/decisiontree/LogitBoostCriterion.lua +++ /dev/null @@ -1,45 +0,0 @@ -local dt = require "decisiontree._env" - --- Ref: slide 17 in https://homes.cs.washington.edu/~tqchen/pdf/BoostedTree.pdf - --- equivalent to nn.Sigmoid() + nn.BCECriterion() -local LogitBoostCriterion, parent = torch.class("nn.LogitBoostCriterion", "nn.Criterion") - -function LogitBoostCriterion:__init(sizeAverage) - parent.__init(self) - self.sizeAverage = sizeAverage - self.hessInput = self.gradInput.new() - self._output = torch.Tensor() -end - -function LogitBoostCriterion:updateOutput(input, target) - input.nn.LogitBoostCriterion_updateOutput(input, target, self._output, self.sizeAverage) - self.output = self._output[1] - return self.output -end - -function LogitBoostCriterion:updateGradInput(input, target) - input.nn.LogitBoostCriterion_updateGradInput(input, target, self.gradInput) - return self.gradInput -end - -function LogitBoostCriterion:updateHessInput(input, target) - input.nn.LogitBoostCriterion_updateHessInput(input, target, self.hessInput) - return self.hessInput -end - --- returns gradInput and hessInput -function LogitBoostCriterion:backward2(input, target) - return self:updateGradInput(input, target), self:updateHessInput(input, target) -end - -local gradWrapper = function(input, target, grad) - input.nn.LogitBoostCriterion_updateGradInput(input, target, grad) -end -local hessianWrapper = function(input, target, hessian) - input.nn.LogitBoostCriterion_updateHessInput(input, target, hessian) -end - -function LogitBoostCriterion:getWrappers() - return gradWrapper, hessianWrapper -end diff --git a/contrib/lua-torch/decisiontree/MSECriterion.lua b/contrib/lua-torch/decisiontree/MSECriterion.lua deleted file mode 100644 index 948c1a17e0..0000000000 --- a/contrib/lua-torch/decisiontree/MSECriterion.lua +++ /dev/null @@ -1,13 +0,0 @@ -local dt = require "decisiontree._env" - -function nn.MSECriterion.updateHessianInput(self, input, target) - self.hessInput = self.hessInput or input.new() - self.hessInput:resize(input:size()):fill(2) - return self.hessInput -end - --- returns gradInput and hessInput -function nn.MSECriterion.backward2(self, input, target) - return self:updateGradInput(input, target), self:updateHessInput(input, target) -end - diff --git a/contrib/lua-torch/decisiontree/README.md b/contrib/lua-torch/decisiontree/README.md deleted file mode 100644 index db4622add8..0000000000 --- a/contrib/lua-torch/decisiontree/README.md +++ /dev/null @@ -1,386 +0,0 @@ -# Torch decision tree library - -```lua -local dt = require 'decisiontree' -``` - -This project implements random forests and gradient boosted decision trees (GBDT). -The latter uses gradient tree boosting. -Both use ensemble learning to produce ensembles of decision trees (that is, forests). - -## `nn.DFD` - -One practical application for decision forests is to *discretize* an input feature space into a richer output feature space. -The `nn.DFD` Module can be used as a decision forest discretizer (DFD): - -```lua -local dfd = nn.DFD(df, onlyLastNode) -``` - -where `df` is a `dt.DecisionForest` instance or the table returned by the method `getReconstructionInfo()` on another `nn.DFD` module, and `onlyLastNode` is a boolean that indicates that module should return only the id of the last node visited on each tree (by default it outputs all traversed nodes except for the roots). -The `nn.DFD` module requires dense `input` tensors. -Sparse `input` tensors (tables of tensors) are not supported. -The `output` returned by a call to `updateOutput` is a batch of sparse tensors. -This `output` where `output[1]` and `output[2]` are a respectively a list of key and value tensors: - -```lua -{ - { [torch.LongTensor], ... , [torch.LongTensor] }, - { [torch.Tensor], ... , [torch.Tensor] } -} -``` - -This module doesn't support CUDA. - -### Example -As a concrete example, let us first train a Random Forest on a dummy dense dataset: - -```lua -local nExample = 100 -local batchsize = 2 -local inputsize = 10 - --- train Random Forest -local trainSet = dt.getDenseDummyData(nExample, nil, inputsize) -local opt = { - activeRatio=0.5, - featureBaggingSize=5, - nTree=4, - maxLeafNodes=nExample/2, - minLeafSize=nExample/10, -} -local trainer = dt.RandomForestTrainer(opt) -local df = trainer:train(trainSet, trainSet.featureIds) -mytester:assert(#df.trees == opt.nTree) -``` - -Now that we have `df`, a `dt.DecisionForest` instance, we can use it to initialize `nn.DFD`: - -```lua -local dfd = nn.DFD(df) -``` - -The `dfd` instance holds no reference to `df`, instead it extracts the relevant attributes from `df`. -These attributes are stored in tensors for batching and efficiency. - -We can discretize a hypothetical `input` by calling `forward`: -```lua -local input = trainSet.input:sub(1,batchsize) -local output = dfd:forward(input) -``` - -The resulting output is a table consisting of two tables: keys and values. -The keys and values tables each contains `batchsize` tensors: - -```lua -print(output) -{ - 1 : - { - 1 : LongTensor - size: 14 - 2 : LongTensor - size: 16 - 3 : LongTensor - size: 15 - 4 : LongTensor - size: 13 - } - 2 : - { - 1 : DoubleTensor - size: 14 - 2 : DoubleTensor - size: 16 - 3 : DoubleTensor - size: 15 - 4 : DoubleTensor - size: 13 - } -} -``` - -An example's feature keys (`LongTensor`) and commensurate values (`DoubleTensor`) have the same number of elements. -The examples have variable number of key-value pairs representing the nodes traversed in the tree. -The output feature space has as many dimensions (that is, possible feature keys) for each node in the forest. - -## `torch.SparseTensor` - -Suppose you have a set of `keys` mapped to `values`: -```lua -local keys = torch.LongTensor{1,3,4,7,2} -local values = torch.Tensor{0.1,0.3,0.4,0.7,0.2} -``` - -You can use a `SparseTensor` to encapsulate these into a read-only tensor: - -```lua -local st = torch.SparseTensor(input, target) -``` - -The _decisiontree_ library uses `SparseTensors` to simulate the `__index` method of the `torch.Tensor`. -For example, one can obtain the value associated to key 3 of the above `st` instance: - -```lua -local value = st[3] -assert(value == 0.3) -``` - -When the key,value pair are missing, `nil` is returned instead: - -```lua -local value = st[2] -assert(value == nil) -``` - -The best implementation for this kind of indexing is slow (it uses a sequential scan of the `keys). -To speedup indexing, one can call the `buildIndex()` method before hand: - -```lua -st:buildIndex() -``` - -The `buildIndex()` creates a hash map (a Lua table) of keys to their commensurate indices in the `values` table. - -## `dt.DataSet` - -The `CartTrainer`, `RandomForestTrainer` and `GradientBoostTrainer` require that data sets be encapsulated into a `DataSet`. -Suppose you have a dataset of dense inputs and targets: - -```lua -local nExample = 10 -local nFeature = 5 -local input = torch.randn(nExample, nFeature) -local target = torch.Tensor(nExample):random(0,1) -``` - -these can be encapsulated into a `DataSet` object: - -```lua -local dataset = dt.DataSet(input, target) -``` - -Now suppose you have a dataset where the `input` is a table of `SparseTensor` instances: - -```lua -local input = {} -for i=1,nExample do - local nKeyVal = math.random(2,nFeature) - local keys = torch.LongTensor(nKeyVal):random(1,nFeature) - local values = torch.randn(nKeyVal) - input[i] = torch.SparseTensor(keys, values) -end -``` - -You can still use a `DataSet` to encapsulate the sparse dataset: - -```lua -local dataset = dt.DataSet(input, target) -``` - -The main purpose of the `DataSet` class is to sort each feature by value. -This is captured by the `sortFeatureValues(input)` method, which is called in the constructor: - -```lua -local sortedFeatureValues, featureIds = self:sortFeatureValues(input) -``` - -The `featureIds` is a `torch.LongTensor` of all available feature IDs. -For a dense `input` tensor, this is just `torch.LongTensor():range(1,input:size(2))`. -But for a sparse `input` tensor, the `featureIds` tensor only contains the feature IDs present in the dataset. - -The resulting `sortedFeatureValues` is a table mapping `featureIds` to `exampleIds` sorted by `featureValues`. -For each `featureId`, examples are sorted by `featureValue` in ascending order. -For example, the table might look like: `{featureId=exampleIds}` where `examplesIds={1,3,2}`. - -The `CartTrainer` accesses the `sortedFeatureValues` tensor by calling `getSortedFeature(featureId)`: - -```lua -local exampleIdsWithFeature = dataset:getSortedFeature(featureId) -``` - -The ability to access examples IDs sorted by feature value, given a feature ID, is the main purpose of the `DataSet`. -The `CartTrainer` relies on these sorted lists to find the best way to split a set of examples between two tree nodes. - -## `dt.CartTrainer` - -```lua -local trainer = dt.CartTrainer(dataset, minLeafSize, maxLeafNodes) -``` - -The `CartTrainer` is used by the `RandomForestTrainer` and `GradientBoostTrainer` to train individual trees. -CART stands for classification and regression trees. -However, only binary classifiers are unit tested. - -The constructor takes the following arguments: - - * `dataset` is a `dt.DataSet` instance representing the training set. - * `minLeafSize` is the minimum examples per leaf node in a tree. The larger the value, the more regularization. - * `maxLeafNodes` is the maximum nodes in the tree. The lower the value, the more regularization. - -Training is initiated by calling the `train()` method: - -```lua -local trainSet = dt.DataSet(input, target) -local rootTreeState = dt.GiniState(trainSet:getExampleIds()) -local activeFeatures = trainSet.featureIds -local tree = trainer:train(rootTreeState, activeFeatures) -``` - -The resulting `tree` is a `CartTree` instance. -The `rootTreeState` is a `TreeState` instance like `GiniState` (used by `RandomForestTrainer`) or `GradientBoostState` (used by `GradientBoostTrainer`). -The `activeFeatures` is a `LongTensor` of feature IDs that used to build the tree. -Every other feature ID is ignored during training. This is useful for feature bagging. - -By default the `CartTrainer` runs in a single-thread. -The `featureParallel(nThread)` method can be called before calling `train()` to parallelize training using `nThread` workers: - -```lua -local nThread = 3 -trainer:featureParallel(nThread) -trainer:train(rootTreeState, activeFeatures) -``` - -Feature parallelization assigns a set of features IDs to each thread. - -The `CartTrainer` can be used as a stand-alone tree trainer. -But it is recommended to use it within the context of a `RandomForestTrainer` or `GradientBoostTrainer` instead. -The latter typically generalize better. - -## RandomForestTrainer - -The `RandomForestTrainer` is used to train a random forest: - -```lua -local nExample = trainSet:size() -local opt = { - activeRatio=0.5, - featureBaggingSize=5, - nTree=14, - maxLeafNodes=nExample/2, - minLeafSize=nExample/10, -} -local trainer = dt.RandomForestTrainer(opt) -local forest = trainer:train(trainSet, trainSet.featureIds) -``` - -The returned `forest` is a `DecisionForest` instance. -A `DecisionForest` has a similar interface to the `CartTree`. -Indeed, they both sub-class the `DecisionTree` abstract class. - -The constructor takes a single `opt` table argument, which contains the actual arguments: - - * `activeRatio` is the ratio of active examples per tree. This is used for boostrap sampling. - * `featureBaggingSize` is the number of features per tree. This is also used fpr feature bagging. - * `nTree` is the number of trees to be trained. - * `maxLeafNodes` and `minLeafSize` are passed to the underlying `CartTrainer` constructor (controls regularization). - -Internally, the `RandomForestTrainer` passes a `GiniBoostState` to the `CartTrainer:train()` method. - -Training can be parallelized by calling `treeParallel(nThread)`: - -```lua -local nThread = 3 -trainer:treeParallel(nThread) -local forest = trainer:train(trainSet, trainSet.featureIds) -``` - -Training then parallelizes by training each tree in its own thread worker. - -## GradientBoostTrainer - -References: - * A. [Boosted Tree presentation](https://homes.cs.washington.edu/~tqchen/pdf/BoostedTree.pdf) - -Graient boosted decision trees (GBDT) can be trained as follows: -```lua -local nExample = trainSet:size() -local maxLeafNode, minLeafSize = nExample/2, nExample/10 -local cartTrainer = dt.CartTrainer(trainSet, minLeafSize, maxLeafNode) - -local opt = { - lossFunction=nn.LogitBoostCriterion(false), - treeTrainer=cartTrainer, - shrinkage=0.1, - downsampleRatio=0.8, - featureBaggingSize=-1, - nTree=14, - evalFreq=8, - earlyStop=0 -} - -local trainer = dt.GradientBoostTrainer(opt) -local forest = trainer:train(trainSet, trainSet.featureIds, validSet) -``` - -The above code snippet uses the `LogitBoostCriterion` outlined in reference A. -It is used for training binary classification trees. - -The returned `forest` is a `DecisionForest` instance. -A `DecisionForest` has a similar interface to the `CartTree`. -Indeed, they both sub-class the `DecisionTree` abstract class. - -The constructor takes a single `opt` table argument, which contains the actual arguments: - - * `lossFunction` is a `nn.Criterion` instance extended to include the `updateHessInput(input, target)` and `backward2(input, target)`. These return the hessian of the `input`. - * `treeTrainer` is a `CartTrainer` instance. Its `featureParallel()` method can be called to implement feature parallelization. - * `shrinkage` is the weight of each additional tree. - * `downsampleRatio` is the ratio of examples to be sampled for each tree. Used for bootstrap sampling. - * `featureBaggingSize` is the number of features to sample per tree. Used for feature bagging. `-1` defaults to `torch.round(math.sqrt(featureIds:size(1)))` - * `nTree` is the maximum number of trees. - * `evalFreq` is the number of epochs between calls to `validate()` for cross-validation and early-stopping. - * `earlyStop` is the maximum number of epochs to wait for early-stopping. - -Internally, the `GradientBoostTrainer` passes a `GradientBoostState` to the `CartTrainer:train()` method. - -## TreeState - -An abstract class that holds the state of a subtree during decision tree training. -It also manages the state of candidate splits. - -```lua -local treeState = dt.TreeState(exampleIds) -``` - -The `exampleIds` argument is a `LongTensor` containing the example IDs that make up the sub-tree. - -## GiniState - -A `TreeState` subclass used internally by the `RandomForestTrainer`. -Uses Gini impurity to determine how to split trees. - -```lua -local treeState = dt.GiniState(exampleIds) -``` - -The `exampleIds` argument is a `LongTensor` containing the example IDs that make up the sub-tree. - -## GradientBoostState - -A `TreeState` subclass used internally by the `GradientBoostTrainer`. -It implements the GBDT spliting algorithm, which uses a loss function. - -```lua -local treeState = dt.GradientBoostState(exampleIds, lossFunction) -``` - -The `exampleIds` argument is a `LongTensor` containing the example IDs that make up the sub-tree. -The `lossFunction` is an `nn.Criterion` instance (see `GradientBoostTrainer`). - - -## WorkPool - -Utility class that simplifies construction of a pool of daemon threads with which to execute tasks in parallel. - -```lua -local workpool = dt.WorkPool(nThread) -``` - -## CartTree - -Implements a trained CART decision tree: - -```lua -local tree = nn.CartTree(rootNode) -``` - -The `rootNode` is a `CartNode` instance. -Each `CartNode` contains pointers to left and right branches, which are themselves `CartNode` instances. - -For inference, use the `score(input)` method: - -```lua -local score = tree:score(input) -``` diff --git a/contrib/lua-torch/decisiontree/RandomForestTrainer.lua b/contrib/lua-torch/decisiontree/RandomForestTrainer.lua deleted file mode 100644 index 41040b25b2..0000000000 --- a/contrib/lua-torch/decisiontree/RandomForestTrainer.lua +++ /dev/null @@ -1,159 +0,0 @@ -local dt = require "decisiontree._env" - -local RandomForestTrainer = torch.class("dt.RandomForestTrainer", dt) - -function RandomForestTrainer:__init(opt) - assert(torch.type(opt.nTree) == 'number') - assert(opt.nTree > 0) - self.nTree = opt.nTree - -- max number of leaf nodes per tree - assert(torch.type(opt.maxLeafNodes) == 'number') - assert(opt.maxLeafNodes > 0) - self.maxLeafNodes = opt.maxLeafNodes - -- min number of examples per leaf - assert(torch.type(opt.minLeafSize) == 'number') - assert(opt.minLeafSize > 0) - self.minLeafSize = opt.minLeafSize - - -- when non-positive, defaults to sqrt(#feature) - assert(torch.type(opt.featureBaggingSize) == 'number') - self.featureBaggingSize = opt.featureBaggingSize - - assert(torch.type(opt.activeRatio) == 'number') - assert(opt.activeRatio > 0) - self.activeRatio = opt.activeRatio - - -- default parallelization is singlethread - self.parallelMode = 'singlethread' -end - --- Train a DecisionForest -function RandomForestTrainer:train(trainSet, featureIds, verbose) - assert(torch.isTypeOf(trainSet, 'dt.DataSet')) - assert(torch.type(featureIds) == 'torch.LongTensor') - - if verbose then print(string.format("Begin training Decision Forest with %d trees", self.nTree)) end - - local weight = torch.Tensor(self.nTree):fill(1 / self.nTree) -- RF uses uniform weights - - local trees - if self.parallelMode == 'singlethread' then - trees = self:trainTrees(trainSet, featureIds, verbose) - elseif self.parallelMode == 'treeparallel' then - trainSet:deleteIndex() -- prevents serialization bottleneck - trees = self:trainTreesTP(trainSet, featureIds, verbose) - else - error("Unrecognized parallel mode: " .. self.parallelMode) - end - - if verbose then print(string.format("Successfully trained %d trees", #trees)) end - - -- set bias - local bias = 0; - for i, tree in ipairs(trees) do - bias = bias + tree.root.score * weight[i] - end - - return dt.DecisionForest(trees, weight, bias) -end - -function RandomForestTrainer:trainTrees(trainSet, featureIds, verbose) - - -- the same CartTrainer will be used for each tree - local cartTrainer = dt.CartTrainer(trainSet, self.minLeafSize, self.maxLeafNodes) - - local trees = {} - for treeId=1,self.nTree do - -- Train a CartTree - local tree = self.trainTree(cartTrainer, featureIds, self.featureBaggingSize, self.activeRatio, treeId, verbose) - table.insert(trees, tree) - end - return trees -end - --- static function that returns a cartTree -function RandomForestTrainer.trainTree(cartTrainer, featureIds, baggingSize, activeRatio, treeId, verbose) - assert(torch.isTypeOf(cartTrainer, 'dt.CartTrainer')) - assert(torch.type(featureIds) == 'torch.LongTensor') - local baggingSize = baggingSize > 0 and baggingSize or torch.round(math.sqrt(featureIds:size(1))) - - if verbose then - print(string.format("Tree %d: Creating features bootstrap sample with baggingSize %d, nFeatures %d", treeId, baggingSize, featureIds:size(1))) - end - - local trainSet = cartTrainer.dataset - - -- sample boot strap features - local baggingIndices = torch.LongTensor(baggingSize):random(1,featureIds:size(1)) - local activeFeatures = featureIds:index(1, baggingIndices) - - -- sample boot strap examples - local sampleSize = torch.round(trainSet:size() * activeRatio) - if verbose then print(string.format("Creating bootstrap sample created of size %d", sampleSize)) end - - baggingIndices:resize(sampleSize):random(1,trainSet:size()) - local bootStrapExampleIds = torch.LongTensor() - bootStrapExampleIds:index(trainSet:getExampleIds(), 1, baggingIndices) - - local cartTree = cartTrainer:train(dt.GiniState(bootStrapExampleIds), activeFeatures) - - if verbose then print(string.format("Complete processing tree number %d", treeId)) end - - return cartTree -end - --- call before training to enable tree-level parallelization -function RandomForestTrainer:treeParallel(workPool) - assert(self.parallelMode == 'singlethread', self.parallelMode) - self.parallelMode = 'treeparallel' - self.workPool = torch.type(workPool) == 'number' and dt.WorkPool(workPool) or workPool - assert(torch.isTypeOf(self.workPool, 'dt.WorkPool')) - - -- require the dt package - self.workPool:update('require', {libname='decisiontree',varname='dt'}) -end - --- TP is for tree parallel (not toilet paper) -function RandomForestTrainer:trainTreesTP(trainSet, featureIds, verbose) - assert(torch.isTypeOf(trainSet, 'dt.DataSet')) - assert(torch.type(featureIds) == 'torch.LongTensor') - local minLeafSize = self.minLeafSize - local maxLeafNodes = self.maxLeafNodes - - -- setup worker store (each worker will have its own cartTrainer) - self.workPool:updateup('execute', function(store) - local dt = require 'decisiontree' - - store.cartTrainer = dt.CartTrainer(trainSet, minLeafSize, maxLeafNodes) - store.featureIds = featureIds - end) - - for treeId=1,self.nTree do - -- upvalues - local baggingSize = self.featureBaggingSize - local activeRatio = self.activeRatio - -- task closure that will be executed in worker-thread - local function trainTreeTask(store) - local dt = require 'decisiontree' - return dt.RandomForestTrainer.trainTree(store.cartTrainer, store.featureIds, baggingSize, activeRatio, treeId, verbose) - end - self.workPool:writeup('execute', trainTreeTask) - end - - local trees = {} - for treeId=1,self.nTree do - local taskname, tree = self.workPool:read() - assert(taskname=='execute') - assert(torch.isTypeOf(tree, 'dt.CartTree')) - table.insert(trees, tree) - end - return trees -end - -function RandomForestTrainer:getName() - return string.format( - "randomforest-aRatio-%4.2f-maxLeaf-%d-minExample-%d-nTree-%d", - self.activeRatio, self.maxLeafNodes, self.minLeafSize, self.nTree - ) -end - diff --git a/contrib/lua-torch/decisiontree/Sparse2Dense.lua b/contrib/lua-torch/decisiontree/Sparse2Dense.lua deleted file mode 100644 index 4e5b79d2fc..0000000000 --- a/contrib/lua-torch/decisiontree/Sparse2Dense.lua +++ /dev/null @@ -1,88 +0,0 @@ -local S2D, parent = torch.class("nn.Sparse2Dense", "nn.Module") -local dt = require 'decisiontree._env' - -function S2D:__init(features) - parent.__init(self) - if torch.type(features) == 'table' then - assert(#features > 0) - features = torch.LongTensor(features) - end - assert(torch.isTensor(features)) - self.features = features - self.featureMap = nil - self.masks = {} - self.mappedKeys = {} -end - -function S2D:updateOutput(input) - if not self.featureMap then - self.featureMap = dt.HashMap() - self.featureMap:fill(self.features) - end - local batched, keys, values - if torch.isTensor(input[1]) then - keys = {input[1]} - values = {input[2]} - batched = false - else - keys = input[1] - values = input[2] - batched = true - end - assert(#keys == #values) - - local masks = self.masks - local mappedKeys = self.mappedKeys - local nKeys = #keys - local nMasks = #masks - if nMasks < nKeys then - for i=nMasks+1,nKeys do - masks[i] = torch.ByteTensor() - mappedKeys[i] = torch.LongTensor() - end - elseif nMasks > nKeys then - for i=nKeys+1,nMasks do - masks[i] = nil - mappedKeys[i] = nil - end - end - - self.featureMap:get(keys, mappedKeys, masks) - self.output = self.output or torch.Tensor():type(self._type) - self.output.nn.S2D_computeOutput(self.output, mappedKeys, values, masks, self.features) - if not batched then - self.output = self.output:view(-1) - end - return self.output -end - -function S2D:type(type, tensorCache) - if type then - local features = self.features - self.features = nil - parent.type(self, type, tensorCache) - self.features = features - return self - else - return parent.type(self) - end -end - -function S2D:updateGradInput(input, gradOutput) - error"Not Implemented" -end - -function S2D:reset() - parent.reset(self) - self.featureMap = nil -end - -function S2D:write(file) - self.featureMap = nil - parent.write(self, file) -end - -function S2D:read(file) - self.featureMap = nil - parent.read(self, file) -end diff --git a/contrib/lua-torch/decisiontree/SparseTensor.lua b/contrib/lua-torch/decisiontree/SparseTensor.lua deleted file mode 100644 index 4c620e618c..0000000000 --- a/contrib/lua-torch/decisiontree/SparseTensor.lua +++ /dev/null @@ -1,54 +0,0 @@ - -local SparseTensor = torch.class("torch.SparseTensor") - -function SparseTensor:__init(keys, values) - if keys and values then - assert(torch.typename(keys):find('torch%..*LongTensor')) - assert(torch.isTensor(values)) - assert(keys:nElement() == values:nElement(), "Expecting key and value tensors of same size") - self.keys = keys - self.values = values - elseif not (keys or values) then - self.keys = torch.LongTensor() - self.values = torch.Tensor() - else - error"Expecting zero or two args" - end -end - -function SparseTensor:buildIndex(overwrite) - if self._map and not overwrite then return end - assert(self.keys and self.keys:dim() == 1) - assert(self.values and self.values:dim() == 1) - -- hash table - self._map = {} - for i=1,self.keys:size(1) do - self._map[self.keys[i]] = i - end -end - -function SparseTensor:deleteIndex() - self._map = nil -end - -local __index = SparseTensor.__index -function SparseTensor:__index(key) - if key == nil then - error"Attempt to index using a nil key" - elseif torch.type(key) ~= 'number' then - return __index(self, key) - end - - if self._map then - assert(torch.type(self._map) == 'table') - local idx = self._map[key] - return idx and self.values[idx] or nil - elseif self.keys:nElement() > 0 then - for i=1,self.keys:size(1) do - if self.keys[i] == key then - return self.values[i] - end - end - end - return nil -end \ No newline at end of file diff --git a/contrib/lua-torch/decisiontree/TreeState.lua b/contrib/lua-torch/decisiontree/TreeState.lua deleted file mode 100644 index 3928649fd0..0000000000 --- a/contrib/lua-torch/decisiontree/TreeState.lua +++ /dev/null @@ -1,191 +0,0 @@ -local dt = require "decisiontree._env" - -local TreeState = torch.class("dt.TreeState", dt) - --- Holds the state of a subtree during decision tree training. --- Also, manages the state of candidate splits -function TreeState:__init(exampleIds) - assert(torch.type(exampleIds) == 'torch.LongTensor') - self.exampleIds = exampleIds - - self.nExampleInLeftBranch = 0 - self.nExampleInRightBranch = 0 -end - --- computes and returns the score of the node based on its examples -function TreeState:score(dataset) - error"NotImplemented" -end - - --- Initializes the split-state-updater. Initially all examples are in the left branch. --- exampleIdsWithFeature is list of examples to split (those having a particular feature) -function TreeState:initialize(exampleIdsWithFeature, dataset) - error"NotImplemented" -end - --- Update the split state. This call has the effect of shifting the example from the left to the right branch. -function TreeState:update(exampleId, dataset) - error"NotImplemented" -end - --- Computes the SplitInfo determined by the current split state --- @param splitFeatureId the feature id of the split feature --- @param splitFeatureValue the feature value of the split feature --- @return the SplitInfo determined by the current split state -function TreeState:computeSplitInfo(splitFeatureId, splitFeatureValue) - error"NotImplemented" -end - --- bottleneck -function TreeState:findBestFeatureSplit(dataset, featureId, minLeafSize) - local dt = require "decisiontree" - assert(torch.isTypeOf(dataset, 'dt.DataSet')) - assert(torch.type(featureId) == 'number') - assert(torch.type(minLeafSize) == 'number') - - -- all dataset example having this feature, sorted by value - local featureExampleIds = dataset:getSortedFeature(featureId) - - local buffer = dt.getBufferTable('TreeState') - buffer.longtensor = buffer.longtensor or torch.LongTensor() - local exampleIdsWithFeature = buffer.longtensor - - -- map and tensor of examples containing feature: - local exampleMap = {} - local getExampleFeatureValue - - local j = 0 - if torch.type(dataset.input) == 'table' then - exampleIdsWithFeature:resize(self.exampleIds:size()) - self.exampleIds:apply(function(exampleId) - local input = dataset.input[exampleId] - input:buildIndex()-- only builds index first time - if input[featureId] then - j = j + 1 - exampleIdsWithFeature[j] = exampleId - exampleMap[exampleId] = j - end - end) - if j == 0 then - return - end - exampleIdsWithFeature:resize(j) - getExampleFeatureValue = function(exampleId) return dataset.input[exampleId][featureId] end - else - exampleIdsWithFeature = self.exampleIds - self.exampleIds:apply(function(exampleId) - j = j + 1 - exampleMap[exampleId] = j - end) - local featureValues = dataset.input:select(2,featureId) - getExampleFeatureValue = function(exampleId) return featureValues[exampleId] end - end - - - self:initialize(exampleIdsWithFeature, dataset) - - -- bottleneck - local bestSplit, previousSplitValue, _tictoc - for i=featureExampleIds:size(1),1,-1 do -- loop over examples sorted (desc) by feature value - local exampleId = featureExampleIds[i] - - local exampleIdx = exampleMap[exampleId] - if exampleIdx then - local splitValue = getExampleFeatureValue(exampleId) - - if previousSplitValue and math.abs(splitValue - previousSplitValue) > dt.EPSILON then - local splitInfo = self:computeSplitInfo(featureId, previousSplitValue, _tictoc) - if (splitInfo.leftChildSize >= minLeafSize) and (splitInfo.rightChildSize >= minLeafSize) then - - if (not bestSplit) or (splitInfo.splitGain < bestSplit.splitGain) then - _tictoc = bestSplit or {} -- reuse table - bestSplit = splitInfo - end - - end - end - - previousSplitValue = splitValue - - -- bottleneck - self:update(exampleId, dataset, exampleIdx) - end - end - - return bestSplit -end - --- finds the best split of examples in treeState among featureIds -function TreeState:findBestSplit(dataset, featureIds, minLeafSize, shardId, nShard) - assert(torch.isTypeOf(dataset, 'dt.DataSet')) - assert(torch.type(featureIds) == 'torch.LongTensor') - assert(torch.type(minLeafSize) == 'number') - assert(torch.type(shardId) == 'number') - assert(torch.type(nShard) == 'number') - - local bestSplit - for i=1,featureIds:size(1) do - local featureId = featureIds[i] - if (nShard <= 1) or ( (featureId % nShard) + 1 == shardId ) then -- feature sharded - local splitCandidate = self:findBestFeatureSplit(dataset, featureId, minLeafSize) - if splitCandidate and ((not bestSplit) or (splitCandidate.splitGain < bestSplit.splitGain)) then - bestSplit = splitCandidate - end - end - end - - return bestSplit -end - --- Partitions self given a splitInfo table, producing a pair of exampleIds corresponding to the left and right subtrees. -function TreeState:_branch(splitInfo, dataset) - local leftIdx, rightIdx = 0, 0 - local nExample = self.exampleIds:size(1) - local splitExampleIds = torch.LongTensor(nExample) - - - for i=1,self.exampleIds:size(1) do - local exampleId = self.exampleIds[i] - local input = dataset.input[exampleId] - local val = input[splitInfo.splitId] - -- Note: when the feature is not present in the example, the example is droped from all sub-trees. - -- Which means that for most sparse data, a tree cannot reach 100% accuracy... - if val then - if val < splitInfo.splitValue then - leftIdx = leftIdx + 1 - splitExampleIds[leftIdx] = exampleId - else - rightIdx = rightIdx + 1 - splitExampleIds[nExample-rightIdx+1] = exampleId - end - end - end - - local leftExampleIds = splitExampleIds:narrow(1,1,leftIdx) - local rightExampleIds = splitExampleIds:narrow(1,nExample-rightIdx+1,rightIdx) - - assert(leftExampleIds:size(1) + rightExampleIds:size(1) <= self.exampleIds:size(1), "Left and right branches contain more data than the parent!") - return leftExampleIds, rightExampleIds -end - --- calls _branch and encapsulates the left and right exampleIds into a TreeStates -function TreeState:branch(splitInfo, dataset) - local leftExampleIds, rightExampleIds = self:_branch(splitInfo, dataset) - return self.new(leftExampleIds), self.new(rightExampleIds) -end - -function TreeState:size() - return self.exampleIds:size(1) -end - -function TreeState:contains(exampleId) - local found = false - self.exampleIds:apply(function(x) - if x == exampleId then - found = true - end - end) - return found -end - diff --git a/contrib/lua-torch/decisiontree/WorkPool.lua b/contrib/lua-torch/decisiontree/WorkPool.lua deleted file mode 100644 index 8f473727e0..0000000000 --- a/contrib/lua-torch/decisiontree/WorkPool.lua +++ /dev/null @@ -1,156 +0,0 @@ -local dt = require "decisiontree._env" - --- Utility to simplify construction of a pool of daemon threads with which to execute tasks in parallel. -local WorkPool = torch.class("dt.WorkPool", dt) - -function WorkPool:__init(nThread) - self.nThread = nThread or 16 - assert(torch.type(self.nThread) == 'number') - assert(self.nThread > 0) - - self:initialize() -end - -function WorkPool:initialize() - local ipc = require 'libipc' - self.queuename = os.tmpname() - self.queue = ipc.workqueue(self.queuename) - self.queues = {} - for i=1,self.nThread do - self.queues[i] = ipc.workqueue(self.queuename.."/"..i) - end - - -- spawn thread workers - ipc.map(self.nThread, function(queuename, nThread, myId) - assert(nThread) - assert(myId) - local ipc = require 'libipc' - - -- Open the queue by name (the main thread already created it) - local mainqueue = ipc.workqueue(queuename) - local workqueue = ipc.workqueue(queuename.."/"..myId) - - local taskname, args - - local store = {} - local queue = mainqueue - - repeat - local msg = queue:read() - assert(torch.type(msg) == 'table') - taskname, task = unpack(msg) - if taskname == nil then - break - elseif torch.type(taskname) ~= 'string' then - error("Expecting taskname string. Got "..torch.type(taskname)) - elseif taskname == 'storeKeyValue' then - assert(torch.type(task) == 'table') - assert(queue == workqueue) - store[task.key] = task.value - queue:write({taskname}) - elseif taskname == 'storeKeysValues' then - assert(torch.type(task) == 'table') - assert(queue == workqueue) - for key,value in pairs(task) do - store[key] = value - end - queue:write({taskname}) - elseif taskname == 'require' then - assert(torch.type(task) == 'table') - assert(torch.type(task.libname) == 'string') - assert(torch.type(task.varname) == 'string') - _G[task.varname] = require(task.libname) - assert(queue == workqueue) - queue:write({taskname}) - elseif taskname == 'storeReset' then - store = {} - mainqueue:write({taskname}) - elseif taskname == 'echo' then - mainqueue:write({taskname, task}) - elseif taskname == 'readWorkerQueue' then - queue = workqueue - elseif taskname == 'readMainQueue' then - queue = mainqueue - elseif taskname == 'execute' then - if torch.type(task) == 'table' then - assert(task.func and task.args) - queue:write({taskname, task.func(store, task.args, myId)}) - else - assert(torch.type(task) == 'function') - queue:write({taskname, task(store, myId)}) - end - else - error("Unknown taskname: "..taskname) - end - until taskname == nil - end, self.queuename, self.nThread) - -end - --- Terminates all daemon threads. -function WorkPool:terminate() - for i=1,self.nThread do - self.queue:write({}) - end -end - --- this function is used to update the store of data in each worker thread -function WorkPool:_update(taskname, task, upval) - assert(torch.type(taskname) == 'string') - local _ = require 'moses' - assert(_.contains({'storeKeyValue','storeKeysValues','require','execute'}, taskname)) - assert(torch.type(task) == 'table' or torch.type(task) == 'function') - - -- tell the workers to read their individual queue - for i=1,self.nThread do - self.queue:write({'readWorkerQueue'}) - end - - -- write to individual worker queues - for i=1,self.nThread do - if upval then - self.queues[i]:writeup({taskname, task}) - else - self.queues[i]:write({taskname, task}) - end - end - - -- TODO use ipc.mutex:barrier(nThread+1) - -- barrier: make sure that every worker has completed task by reading their queue - for i=1,self.nThread do - assert(self.queues[i]:read()[1] == taskname) - end - - -- finally, tell them to read the main queue - for i=1,self.nThread do - self.queues[i]:write({'readMainQueue'}) - end -end - -function WorkPool:update(taskname, task) - return self:_update(taskname, task, false) -end - -function WorkPool:updateup(taskname, task) - return self:_update(taskname, task, true) -end - -function WorkPool:write(taskname, task) - assert(torch.type(taskname) == 'string') - assert(taskname ~= 'storeKeyValue' or taskname ~= 'storeKeysValues') - self.queue:write({taskname, task}) -end - -function WorkPool:writeup(taskname, task) - assert(torch.type(taskname) == 'string') - assert(taskname ~= 'storeKeyValue' or taskname ~= 'storeKeysValues') - self.queue:writeup({taskname, task}) -end - -function WorkPool:read() - local res = self.queue:read() - assert(torch.type(res) == 'table') - assert(torch.type(res[1] == 'string')) - return unpack(res) -end - diff --git a/contrib/lua-torch/decisiontree/_env.lua b/contrib/lua-torch/decisiontree/_env.lua deleted file mode 100644 index a927701522..0000000000 --- a/contrib/lua-torch/decisiontree/_env.lua +++ /dev/null @@ -1,5 +0,0 @@ - --- https://github.com/torch/torch7/issues/525 - -local dl = {} -return dl \ No newline at end of file diff --git a/contrib/lua-torch/decisiontree/benchmark.lua b/contrib/lua-torch/decisiontree/benchmark.lua deleted file mode 100644 index 2b6a03dc6a..0000000000 --- a/contrib/lua-torch/decisiontree/benchmark.lua +++ /dev/null @@ -1,171 +0,0 @@ -local dt = require "decisiontree._env" - -local bm = {} -function bm.CartTrainer(opt) - local timer = torch.Timer() - local trainSet, validSet = dt.getSparseDummyData(opt) - print(string.format("CartTrainer: sparse dataset create: %f samples/sec; %f sec", opt.nExample/timer:time().real, timer:time().real)) - - local cartTrainer = dt.CartTrainer(trainSet, opt.minLeafSize, opt.maxLeafNodes) - local treeState = dt.GiniState(trainSet:getExampleIds()) - timer:reset() - local cartTree, nleaf = cartTrainer:train(treeState, trainSet.featureIds) - print(string.format("CartTrainer: train single-thread : %f samples/sec; %f sec", opt.nExample/timer:time().real, timer:time().real)) - - timer:reset() - cartTrainer:featureParallel(opt.nThread) - print(string.format("CartTrainer: setup feature-parallel : %f samples/sec; %f sec", opt.nExample/timer:time().real, timer:time().real)) - timer:reset() - local cartTree, nleaf = cartTrainer:train(treeState, trainSet.featureIds) - print(string.format("CartTrainer: train feature-parallel : %f samples/sec; %f sec", opt.nExample/timer:time().real, timer:time().real)) -end - -function bm.GradientBoostState(opt) - local trainSet, validSet = dt.getSparseDummyData(opt) - - trainSet:initScore() - - local treeState = dt.GradientBoostState(trainSet:getExampleIds(), nn.LogitBoostCriterion(false)) - - local timer = torch.Timer() -- first step also calls SparseTensor:buildIndex() - treeState:findBestSplit(trainSet, trainSet.featureIds, 10, 1, 3) - print(string.format("GradientBoostState: findBestSplit (first) : %f sec", timer:time().real)) - - timer:reset() - treeState:findBestSplit(trainSet, trainSet.featureIds, 10, 1, 3) - print(string.format("GradientBoostState: findBestSplit (second) : %f sec", timer:time().real)) - -end - -local function file_exists(name) - local f=io.open(name,"r") - if f~=nil then io.close(f) return true else return false end -end - -function bm.GradientBoostTrainer(opt) - local trainSet, validSet - if file_exists("/tmp/train.bin") and file_exists("/tmp/valid.bin") then - trainSet = torch.load("/tmp/train.bin") - validSet = torch.load("/tmp/valid.bin") - else - if opt.sparse then - trainSet, validSet = dt.getSparseDummyData(opt) - else - trainSet, validSet = dt.getDenseDummyData(opt) - end - torch.save("/tmp/train.bin", trainSet) - torch.save("/tmp/valid.bin", validSet) - end - - local cartTrainer = dt.CartTrainer(trainSet, opt.minLeafSize, opt.maxLeafNodes) - opt.lossFunction = nn.LogitBoostCriterion(false) - opt.treeTrainer = cartTrainer - local forestTrainer = dt.GradientBoostTrainer(opt) - - local timer = torch.Timer() - local decisionForest = forestTrainer:train(trainSet, trainSet.featureIds, validSet) - local time = timer:time().real - print(string.format("GradientBoostTrainer: train single-thread : %f samples/sec; %f sec/tree, %f sec", opt.nExample/time, time/opt.nTree, time)) - - cartTrainer:featureParallel(opt.nThread) - timer:reset() - local decisionForest = forestTrainer:train(trainSet, trainSet.featureIds, validSet) - local time = timer:time().real - print(string.format("GradientBoostTrainer: train feature-parallel : %f samples/sec; %f sec/tree, %f sec", opt.nExample/time, time/opt.nTree, time)) -end - -function bm.RandomForestTrainer(opt) - local trainSet, validSet = dt.getSparseDummyData(opt) - - local forestTrainer = dt.RandomForestTrainer(opt) - local decisionForest = forestTrainer:train(trainSet, trainSet.featureIds) - - local timer = torch.Timer() - local decisionForest = forestTrainer:train(trainSet, trainSet.featureIds) - local time = timer:time().real - print(string.format("RandomForestTrainer: train single-thread : %f samples/sec; %f sec/tree, %f sec", opt.nExample/time, time/opt.nTree, time)) - - timer:reset() - forestTrainer:treeParallel(opt.nThread) - print(string.format("RandomForestTrainer: setup tree-parallel : %f samples/sec; %f sec", opt.nExample/timer:time().real, timer:time().real)) - - timer:reset() - local decisionForest = forestTrainer:train(trainSet, trainSet.featureIds) - local time = timer:time().real - print(string.format("RandomForestTrainer: train tree-parallel : %f samples/sec; %f sec/tree, %f sec", opt.nExample/time, time/opt.nTree, time)) -end - -function bm.DFD(opt) - local _ = require 'moses' - local opt = _.clone(opt) - opt.nExample = 200 - local trainSet, validSet = dt.getDenseDummyData(opt) - - local forestTrainer = dt.RandomForestTrainer(opt) - forestTrainer:treeParallel(opt.nThread) - local timer = torch.Timer() - local decisionForest = forestTrainer:train(trainSet, trainSet.featureIds) - local time = timer:time().real - print(string.format("DFD: train random forest in parallel : %f samples/sec; %f sec/tree, %f sec", opt.nExample/time, time/opt.nTree, time)) - - - -- benchmark nn.DFD - local input = trainSet.input:sub(1,opt.batchsize) - local dfd = nn.DFD(decisionForest) - dfd:forward(input) - timer:reset() - for i=1,opt.nloop do - dfd:forward(input) - end - print(string.format("DFD: updateOutput : %f samples/sec; %f sec", opt.nloop*opt.batchsize/timer:time().real, timer:time().real)) -end - -function bm.Sparse2Dense(opt) - local _ = require 'moses' - local opt = _.clone(opt) - opt.nExample = opt.batchsize - local trainSet = dt.getSparseDummyData(opt) - - local input = {{},{}} - for i=1,opt.batchsize do - input[1][i] = trainSet.input[i].keys - input[2][i] = trainSet.input[i].values - end - assert(#input[1] == opt.batchsize) - - -- benchmark nn.Sparse2Dense - local s2d = nn.Sparse2Dense(torch.LongTensor():range(1,opt.nFeature)) - s2d:forward(input) - local timer = torch.Timer() - for i=1,opt.nloop do - s2d:forward(input) - end - print(string.format("Sparse2Dense: updateOutput : %f samples/sec; %f sec", opt.nloop*opt.batchsize/timer:time().real, timer:time().real)) -end - -function dt.benchmark(benchmarks, opt2) - local opt = { - nExample=10000, nCluster=2, nFeature=1000, overlap=0, nValid=100, -- getSparseDummyData - nTree=20, featureBaggingSize=-1, sparse=true, -- GradientBoostTrainer and RandomForestTrainer - nThread=2, shrinkage=0.1, downsampleRatio=0.1, evalFreq=5, earlyStop=0, -- GradientBoostTrainer - activeRatio=0.5, -- RandomForestTrainer - batchsize=32, nloop=10 - } - - local _ = require 'moses' - benchmarks = benchmarks or _.keys(bm) - assert(torch.type(benchmarks) == 'table') - for i,benchmark in ipairs(benchmarks) do - local opt1 = _.clone(opt) - for key, value in pairs(opt2 or {}) do - opt1[key] = value - end - opt1.nActive = opt1.nActive or torch.round(opt1.nFeature/10) - opt1.maxLeafNodes = opt1.maxLeafNodes or (opt1.nExample/10) - opt1.minLeafSize = opt1.minLeafSize or (opt1.nExample/100) - - assert(torch.type(benchmark) == 'string', benchmark) - assert(bm[benchmark], benchmark) - bm[benchmark](opt1) - end -end diff --git a/contrib/lua-torch/decisiontree/doc/benchmark.md b/contrib/lua-torch/decisiontree/doc/benchmark.md deleted file mode 100644 index cb8f905d67..0000000000 --- a/contrib/lua-torch/decisiontree/doc/benchmark.md +++ /dev/null @@ -1,291 +0,0 @@ -# Benchmarks - -This file outlines the roadmap (and commensurate benchmarks) of optimizations and refactorings over time. - -## Baseline - -The baseline implementation is very slow. -We converted the Twitter decision tree library (used internally) from Java to Lua. -The objective was to replicate the GBDT and Random Forest implementations as is (more or less). -The Java library is very good and reasonably fast. The same code in Lua is slow. -The point of this Lua baseline was not to obtain the same computational performance as the Java library. -Instead, we wanted the training and inferences algorithms of the Lua lib to match thoses of the Java lib. -As such, the training/validation error of the baseline Lua lib should match that of the Java lib. -The unit tests seem to validate this claim as both training/validation set performance is unit tested. -We also used the conversion exercise as a way to learn about decision tree implementation (our background is deep learning). -That being said, the baseline performance is terrible: - -``` -th -e "dt = require 'decisiontree'; dt.benchmark()" -CartTrainer: sparse dataset create: 2963.192386 samples/sec; 0.337479 sec -CartTrainer: train single-thread : 14.165438 samples/sec; 70.594361 sec -CartTrainer: setup feature-parallel : 5.129034 samples/sec; 194.968478 sec -CartTrainer: train feature-parallel : 9.736592 samples/sec; 102.705344 sec -``` - -The original Java lib had approximately 43 classes. -The baseline has about 24. -This reduction is due to obvious merging of classes. But also to conversions of classes to functions. -The next patches continue this process of reducing the number of classes. - -## Patch 1 (complete): - -This patch further reduces the number of classes, but adds the DataSet class. -The code is much simple to read. Examples are batched. - - * [x] examples are batched in dt.DataSet: {input, target, score} - * [x] deprecate dt.LabeledExample - * [x] list of examples are replaced with torch.LongTensors of exampleIds - * [x] merge TreeBrancher into TreeState - * [x] merge BestSplitFinder and SplitStateUpdater into TreeState - * [x] TreeState subclasses: GradientBoostState and GiniState - -``` -th -e "dt = require 'decisiontree'; dt.benchmark()" -CartTrainer: sparse dataset create: 3597.392294 samples/sec; 0.277984 sec -CartTrainer: train single-thread : 35.763255 samples/sec; 27.961663 sec -CartTrainer: setup feature-parallel : 36759.250495 samples/sec; 0.027220 sec -CartTrainer: train feature-parallel : 72.523658 samples/sec; 13.788606 sec -``` - - The setup time for feature-parallelization is most improved. - The run-time for feature-parallel also about half that of single-thread. - Since its using 2 threads, that means the parallelization is working quite well. - - We also added benchmarks for the `RandomForestTrainer` and `GradientBoostTrainer`: - -``` -GradientBoostTrainer: train single-thread : 599.895105 samples/sec; 0.083348 sec/tree, 1.666958 sec -GradientBoostTrainer: train feature-parallel : 974.235273 samples/sec; 0.051322 sec/tree, 1.026446 sec -RandomForestTrainer: train single-thread : 134.781044 samples/sec; 0.370972 sec/tree, 7.419441 sec -RandomForestTrainer: setup tree-parallel : 73341.097064 samples/sec; 0.013649 sec -RandomForestTrainer: train tree-parallel : 262.975891 samples/sec; 0.190131 sec/tree, 3.802630 sec -``` - -Looks good. - -## Patch 2 (complete): - - * [x] dt.LossFunction -> nn.Criterion (LogitBoost is done, missing MSE) - * [x] use SparseTensor:buildIndex() to accelerate TreeState:findBestSplit() - * [x] benchmarks use 10000 instead of 1000 examples - -The benchmarks indicate good improvements. Most improvements were made possible by the use of `buildIndex`: - -``` -th -e "dt = require 'decisiontree'; dt.benchmark()" -GradientBoostState: findBestSplit (first) : 11.415645 sec -GradientBoostState: findBestSplit (second) : 11.246336 sec -CartTrainer: sparse dataset create: 3284.803629 samples/sec; 3.044327 sec -CartTrainer: train single-thread : 239.544758 samples/sec; 41.745858 sec -CartTrainer: setup feature-parallel : 10996.443063 samples/sec; 0.909390 sec -CartTrainer: train feature-parallel : 473.888592 samples/sec; 21.102011 sec -RandomForestTrainer: train single-thread : 892.985186 samples/sec; 0.559920 sec/tree, 11.198394 sec -RandomForestTrainer: setup tree-parallel : 176806.252266 samples/sec; 0.056569 sec -RandomForestTrainer: train tree-parallel : 1377.849291 samples/sec; 0.362884 sec/tree, 7.257688 sec -GradientBoostTrainer: train single-thread : 2685.485128 samples/sec; 0.186186 sec/tree, 3.723722 sec -GradientBoostTrainer: train feature-parallel : 3712.313215 samples/sec; 0.134687 sec/tree, 2.693738 sec -``` - -The main bottleneck now is in serializing the SparseTensor hash maps. We temporarly overcame this bottleneck by -deleting indexes when calling `CartTrainer:featureParallel()` and `RandomForestTrainer:treeParallel()`. -In this way, the indexes are recreated for each thread. Ideally, we would use a C hash map such that a pointer -could be serialized instead. But `tds.Hash` does not serialize well. For now instead, we use lua tables. - -This is the benchmark for `GradientBoostTrainer` on a large dataset of dense inputs: - -``` -th -e "dt = require 'decisiontree'; dt.benchmark({'GradientBoostTrainer'}, {nExample=100000, sparse=false, nFeature=836, nTree=5, downsampleRatio=1, minLeafSize=1000, maxLeafNodes=8})" -GradientBoostTrainer: train single-thread : 152.463989 samples/sec; 131.178517 sec/tree, 655.892584 sec -GradientBoostTrainer: train feature-parallel : 224.288488 samples/sec; 89.170872 sec/tree, 445.854358 sec -[tw-mbp-nleonard decisiontree]$ th -e "dt = require 'decisiontree'; dt.benchmark({'GradientBoostTrainer'}, {nExample=100000, sparse=false, nFeature=836, nTree=5, downsampleRatio=1, minLeafSize=1000, maxLeafNodes=8,nThread=4})" -GradientBoostTrainer: train single-thread : 163.836896 samples/sec; 122.072625 sec/tree, 610.363126 sec -GradientBoostTrainer: train feature-parallel : 407.981442 samples/sec; 49.021838 sec/tree, 245.109188 sec -``` - -## Patch 3 : - -Optimize GBDT for large datasets consisting of dense inputs. The benchmarks: - -``` -th -e "dt = require 'decisiontree'; dt.benchmark({'GradientBoostTrainer'}, {nExample=100000, sparse=false, nFeature=836, nTree=5, downsampleRatio=1, minLeafSize=1000, maxLeafNodes=8})" -GradientBoostTrainer: train single-thread : 547.553407 samples/sec; 36.526117 sec/tree, 182.630587 sec -GradientBoostTrainer: train feature-parallel : 792.964678 samples/sec; 25.221804 sec/tree, 126.109022 sec -[tw-mbp-nleonard decisiontree]$ th -e "dt = require 'decisiontree'; dt.benchmark({'GradientBoostTrainer'}, {nExample=100000, sparse=false, nFeature=836, nTree=5, downsampleRatio=1, minLeafSize=1000, maxLeafNodes=8,nThread=4})" -GradientBoostTrainer: train single-thread : 555.793759 samples/sec; 35.984571 sec/tree, 179.922855 sec -GradientBoostTrainer: train feature-parallel : 1289.977846 samples/sec; 15.504142 sec/tree, 77.520711 sec -``` - -For 1, 2 and 4 threads, the speedups of patch 3 over patch 2 are respectively: 3.39, 3.53, and 3.18. -For this patch, the multi-threading speedup of 2 and 4 threads over a single thread are respectively: 1.42 and 2.33. -Improvements over the previous patch were obtained by optimizing two aspects: - - 1. Optimizing `TreeState.findBestFeatureSplit` for dense datasets (for example: `if dense, then ...`); - 2. Removing `assert` clauses in `GradientBoostState.update`. The `update` method is called for every (example, feature), making it a major bottleneck. - -Converting the `update` to C could lead to further optimizations. - -This patch also improves the benchmark on sparse datasets: -``` -$ th -e "dt = require 'decisiontree'; dt.benchmark()" -RandomForestTrainer: train single-thread : 1121.311196 samples/sec; 0.445907 sec/tree, 8.918131 sec -RandomForestTrainer: setup tree-parallel : 168773.323354 samples/sec; 0.059256 sec -RandomForestTrainer: train tree-parallel : 1701.280938 samples/sec; 0.293896 sec/tree, 5.877924 sec -GradientBoostState: findBestSplit (first) : 8.250646 sec -GradientBoostState: findBestSplit (second) : 7.952077 sec -GradientBoostTrainer: train single-thread : 3355.248596 samples/sec; 0.149020 sec/tree, 2.980405 sec -GradientBoostTrainer: train feature-parallel : 4399.133369 samples/sec; 0.113659 sec/tree, 2.273175 sec -CartTrainer: sparse dataset create: 3428.105601 samples/sec; 2.917069 sec -CartTrainer: train single-thread : 282.172416 samples/sec; 35.439331 sec -CartTrainer: setup feature-parallel : 9455.440801 samples/sec; 1.057598 sec -CartTrainer: train feature-parallel : 594.054049 samples/sec; 16.833491 sec -DFD: train random forest in parallel : 346.831378 samples/sec; 0.288325 sec/tree, 5.766491 sec -DFD: updateOutput : 831.105546 samples/sec; 0.038509 sec -``` - -## Patch 4 : - -This patch improves `nn.DFD` from - -``` -th -e "dt = require 'decisiontree'; dt.benchmark({'DFD'}, {nTree=500,maxLeafNodes=8,minLeafSize=1})" -DFD: train random forest in parallel : 10.527251 samples/sec; 0.037997 sec/tree, 18.998313 sec -DFD: updateOutput : 32.442950 samples/sec; 9.863472 sec -``` - -to - -``` -th -e "dt = require 'decisiontree'; dt.benchmark({'DFD'}, {nTree=500,maxLeafNodes=8,minLeafSize=1})" -DFD: train random forest in parallel : 10.839547 samples/sec; 0.036902 sec/tree, 18.450956 sec -DFD: updateOutput : 359.158353 samples/sec; 0.890975 sec -Sparse2Dense: updateOutput : 15395.648952 samples/sec; 0.020791 sec -``` - -That is a 10x speedup for `nn.DFD`. - -The patch also adds a benchmark for `nn.Sparse2Dense`: - -``` -th -e "dt = require 'decisiontree'; dt.benchmark({'Sparse2Dense'}, {nTree=500,maxLeafNodes=8,minLeafSize=1})" -Sparse2Dense: updateOutput : 17158.126406 samples/sec; 0.018653 sec -``` - -Indeed, `nn.Sparse2Dense` is not the bottleneck; `nn.DFD` is. - -## Patch 5 : - -This patch improves `nn.DFD` inference from - -``` -for i in `seq 3`; do th -e "dt = require 'decisiontree'; dt.benchmark({'DFD'}, {nTree=500,maxLeafNodes=8,minLeafSize=1,batchsize=16,nActive=1200,nFeature=1300,nloop=100})"; done -DFD: train random forest in parallel : 8.452295 samples/sec; 0.047324 sec/tree, 23.662212 sec -DFD: updateOutput : 176.617872 samples/sec; 9.059109 sec -DFD: train random forest in parallel : 8.350019 samples/sec; 0.047904 sec/tree, 23.952042 sec -DFD: updateOutput : 183.508204 samples/sec; 8.718962 sec -DFD: train random forest in parallel : 8.525779 samples/sec; 0.046917 sec/tree, 23.458266 sec -DFD: updateOutput : 178.877077 samples/sec; 8.944692 sec -``` - -to - -``` -for i in `seq 3`; do th -e "dt = require 'decisiontree'; dt.benchmark({'DFD'}, {nTree=500,maxLeafNodes=8,minLeafSize=1,batchsize=16,nActive=1200,nFeature=1300,nloop=100})"; done -DFD: train random forest in parallel : 8.434502 samples/sec; 0.047424 sec/tree, 23.712129 sec -DFD: updateOutput : 6479.597179 samples/sec; 0.246933 sec -DFD: train random forest in parallel : 8.334543 samples/sec; 0.047993 sec/tree, 23.996518 sec -DFD: updateOutput : 6663.641184 samples/sec; 0.240114 sec -DFD: train random forest in parallel : 8.353265 samples/sec; 0.047885 sec/tree, 23.942735 sec -DFD: updateOutput : 6882.607456 samples/sec; 0.232475 sec -``` - -That is a 37x speedup for `nn.DFD`. - -## Patch 6: - -This patch improves `nn.DFD` from the previous result to - -``` -for i in `seq 5`; do th -e "dt = require 'decisiontree'; dt.benchmark({'DFD'}, {nTree=500,maxLeafNodes=8,minLeafSize=1,batchsize=16,nActive=1200,nFeature=1300,nloop=10000})"; done -DFD: train random forest in parallel : 8.353504 samples/sec; 0.047884 sec/tree, 23.942050 sec -DFD: updateOutput : 91967.342339 samples/sec; 1.739753 sec -DFD: train random forest in parallel : 8.528141 samples/sec; 0.046904 sec/tree, 23.451770 sec -DFD: updateOutput : 91405.321702 samples/sec; 1.750451 sec -DFD: train random forest in parallel : 8.184562 samples/sec; 0.048872 sec/tree, 24.436250 sec -DFD: updateOutput : 91623.388867 samples/sec; 1.746284 sec -DFD: train random forest in parallel : 8.779561 samples/sec; 0.045560 sec/tree, 22.780182 sec -DFD: updateOutput : 93914.242852 samples/sec; 1.703686 sec -DFD: train random forest in parallel : 8.636201 samples/sec; 0.046317 sec/tree, 23.158330 sec -DFD: updateOutput : 94092.241963 samples/sec; 1.700465 sec -``` - -That is another 13.8x speedup. - -## Patch 7: - -This patch improves `nn.Sparse2Dense` computation from - -``` -for i in `seq 3`; do th -e "dt = require 'decisiontree'; torch.setdefaulttensortype('torch.FloatTensor'); dt.benchmark({'Sparse2Dense'}, {nTree=500,maxLeafNodes=8,minLeafSize=1,nFeature=1500,nActive=1300,nloop=1000})"; done -Sparse2Dense: updateOutput : 1103.570777 samples/sec; 28.996786 sec -Sparse2Dense: updateOutput : 1092.064331 samples/sec; 29.302309 sec -Sparse2Dense: updateOutput : 1036.963572 samples/sec; 30.859334 sec -``` - -to - -``` -for i in `seq 3`; do th -e "dt = require 'decisiontree'; torch.setdefaulttensortype('torch.FloatTensor'); dt.benchmark({'Sparse2Dense'}, {nTree=500,maxLeafNodes=8,minLeafSize=1,nFeature=1500,nActive=1300,nloop=1000})"; done -Sparse2Dense: updateOutput : 62995.834470 samples/sec; 0.507978 sec -Sparse2Dense: updateOutput : 62471.568253 samples/sec; 0.512242 sec -Sparse2Dense: updateOutput : 62965.099331 samples/sec; 0.508226 sec -``` - -This represents a speedup of about 57x. - -## Patch 8: - -This patch improves `nn.Sparse2Dense` from the previous result to - -```for i in `seq 3`; do th -e "dt = require 'decisiontree'; torch.setdefaulttensortype('torch.FloatTensor'); dt.benchmark({'Sparse2Dense'}, {nTree=500,maxLeafNodes=8,minLeafSize=1,nFeature=1500,nActive=1300,nloop=1000})"; done -Sparse2Dense: updateOutput : 124268.079914 samples/sec; 0.257515 sec -Sparse2Dense: updateOutput : 114750.039542 samples/sec; 0.278873 sec -Sparse2Dense: updateOutput : 122863.314766 samples/sec; 0.260458 sec -``` - -which corresponds to another 1.95x speedup. - -## Patch 9: - -This patches moves the core of training GBDTs, which used to be a big bottleneck, to C. It also -performs small optimizations across the board (faster scoring, faster branching, ...) that provide a -little more performance. - -The original commit had this performance: - -``` -th -e "dt = require 'decisiontree'; torch.setdefaulttensortype('torch.FloatTensor'); dt.benchmark({'GradientBoostTrainer'}, {nExample=100000, sparse=false, nFeature=836, nTree=5, downsampleRatio=1, minLeafSize=1000, maxLeafNodes=8})" -GradientBoostTrainer: train single-thread : 500.414666 samples/sec; 39.966854 sec/tree, 199.834271 sec -GradientBoostTrainer: train feature-parallel : 1227.228044 samples/sec; 16.296890 sec/tree, 81.484448 sec (4 threads) -GradientBoostTrainer: train feature-parallel : 1385.926280 samples/sec; 14.430782 sec/tree, 72.153910 sec (8 threads) -``` - -and the new version has - -``` -GradientBoostTrainer: train single-thread : 15285.644631 samples/sec; 1.308417 sec/tree, 6.542086 sec -GradientBoostTrainer: train feature-parallel : 43170.435932 samples/sec; 0.463280 sec/tree, 2.316400 sec (4 threads) -GradientBoostTrainer: train feature-parallel : 50062.681239 samples/sec; 0.399499 sec/tree, 1.997496 sec (8 threads) -``` - -That represents a speedup of about 30.5x over the baseline for 1 thread and 36.1x for 8 threads. -Note that the performance doesn't increase much as we increase the number of threads since we use -feature parallelism and the number of features evaluated is small (29 in this case) due to bagging. -If we disable bagging, then we have the following result with 8 threads and the new code: - -``` -GradientBoostTrainer: train single-thread : 590.823965 samples/sec; 33.851030 sec/tree, 169.255152 sec -GradientBoostTrainer: train feature-parallel : 3232.188576 samples/sec; 6.187758 sec/tree, 30.938789 sec -``` - -So processing 836 features now is much faster than processing 29 before. diff --git a/contrib/lua-torch/decisiontree/error.h b/contrib/lua-torch/decisiontree/error.h deleted file mode 100644 index 18df3c939b..0000000000 --- a/contrib/lua-torch/decisiontree/error.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef _ERROR_H_ -#define _ERROR_H_ - -#include "luaT.h" -#include - -static inline int _lua_error(lua_State *L, int ret, const char* file, int line) { - int pos_ret = ret >= 0 ? ret : -ret; - return luaL_error(L, "ERROR: (%s, %d): (%d, %s)\n", file, line, pos_ret, strerror(pos_ret)); -} - -static inline int _lua_error_str(lua_State *L, const char *str, const char* file, int line) { - return luaL_error(L, "ERROR: (%s, %d): (%s)\n", file, line, str); -} - -static inline int _lua_error_str_str(lua_State *L, const char *str, const char* file, int line, const char *extra) { - return luaL_error(L, "ERROR: (%s, %d): (%s: %s)\n", file, line, str, extra); -} - -#define LUA_HANDLE_ERROR(L, ret) _lua_error(L, ret, __FILE__, __LINE__) -#define LUA_HANDLE_ERROR_STR(L, str) _lua_error_str(L, str, __FILE__, __LINE__) -#define LUA_HANDLE_ERROR_STR_STR(L, str, extra) _lua_error_str_str(L, str, __FILE__, __LINE__, extra) - -#endif diff --git a/contrib/lua-torch/decisiontree/generic/CartTree.c b/contrib/lua-torch/decisiontree/generic/CartTree.c deleted file mode 100644 index eb29fcf025..0000000000 --- a/contrib/lua-torch/decisiontree/generic/CartTree.c +++ /dev/null @@ -1,88 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/CartTree.c" -#else - -static int nn_(tree_fast_score)(lua_State *L) { - THTensor *input = luaT_checkudata(L, 1, torch_Tensor); - THTensor *score = luaT_checkudata(L, 3, torch_Tensor); - long n_samples = THTensor_(size)(input, 0); - long n_features = THTensor_(size)(input, 1); - THTensor_(resize1d)(score, n_samples); - real *input_data = THTensor_(data)(input); - real *score_data = THTensor_(data)(score); - - lua_pushstring(L, "leftChild"); - const int left_child_string = 4; - lua_pushstring(L, "rightChild"); - const int right_child_string = 5; - lua_pushstring(L, "score"); - const int score_string = 6; - lua_pushstring(L, "splitFeatureId"); - const int id_string = 7; - lua_pushstring(L, "splitFeatureValue"); - const int value_string = 8; - - const int original_top = lua_gettop(L); - for (long i = 0; i < n_samples; i++) { - int node = 2; - while (1) { - int current_top = lua_gettop(L); - lua_pushvalue(L, left_child_string); - lua_rawget(L, node); - lua_pushvalue(L, right_child_string); - lua_rawget(L, node); - if (lua_isnil(L, -2) && lua_isnil(L, -1)) { - lua_pushvalue(L, score_string); - lua_rawget(L, node); - score_data[i] = lua_tonumber(L, -1); - break; - } - if (lua_isnil(L, -2)) { - // go to right - node = current_top + 2; - continue; - } - if (lua_isnil(L, -1)) { - // go to left - node = current_top + 1; - continue; - } - lua_pushvalue(L, id_string); - lua_rawget(L, node); - lua_pushvalue(L, value_string); - lua_rawget(L, node); - long feature_id = lua_tointeger(L, -2); - real feature_value = lua_tonumber(L, -1); - - real current_value = input_data[i * n_features + (feature_id-1)]; - if (current_value < feature_value) { - // go to left - node = current_top + 1; - } - else { - // go to right - node = current_top + 2; - } - } - lua_pop(L, lua_gettop(L) - original_top); - } - - lua_pop(L, 5); - - lua_pushvalue(L, 3); - return 1; -} - -static const struct luaL_Reg nn_(CT__) [] = { - {"CartTreeFastScore", nn_(tree_fast_score)}, - {NULL, NULL} -}; - -static void nn_(CT_init)(lua_State *L) -{ - luaT_pushmetatable(L, torch_Tensor); - luaT_registeratname(L, nn_(CT__), "nn"); - lua_pop(L,1); -} - -#endif diff --git a/contrib/lua-torch/decisiontree/generic/DFD.c b/contrib/lua-torch/decisiontree/generic/DFD.c deleted file mode 100644 index 599c4d7947..0000000000 --- a/contrib/lua-torch/decisiontree/generic/DFD.c +++ /dev/null @@ -1,157 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/DFD.c" -#else - -static int nn_(DFD_computeOutput)(lua_State *L) { - THLongTensor *outputkeys = luaT_checkudata(L, 1, "torch.LongTensor"); - THTensor *outputvalues = luaT_checkudata(L, 2, torch_Tensor); - THLongTensor *root_ids = luaT_checkudata(L, 3, "torch.LongTensor"); - THLongTensor *left_child = luaT_checkudata(L, 4, "torch.LongTensor"); - THLongTensor *right_child = luaT_checkudata(L, 5, "torch.LongTensor"); - THLongTensor *split_feature_id = luaT_checkudata(L, 6, "torch.LongTensor"); - THTensor *split_feature_value = luaT_checkudata(L, 7, torch_Tensor); - THTensor *input = luaT_checkudata(L, 8, torch_Tensor); - char only_last_node = lua_toboolean(L, 9); - - // gets some important sizes from the input - long batch_size = THTensor_(size)(input, 0); - long input_size = THTensor_(size)(input, 1); - long roots_size = THLongTensor_size(root_ids, 0); - long depth = THLongTensor_size(outputkeys, 1); - - // keeps track of the number of nodes traversed in the trees by each sample. - // each traversed node maps to an output feature having a value of 1 - long outputsize[batch_size]; - for (long i = 0; i < batch_size; i++) - outputsize[i] = 0; - - // gets direct pointers to the memory of each tensor for efficiency - long *root_ids_data = THLongTensor_data(root_ids); - long *left_child_data = THLongTensor_data(left_child); - long *right_child_data = THLongTensor_data(right_child); - real *split_feature_value_data = THTensor_(data)(split_feature_value); - long *split_feature_id_data = THLongTensor_data(split_feature_id); - long *outputkeys_data = THLongTensor_data(outputkeys); - real *input_data = THTensor_(data)(input); - - // for each sample in the batch - for (long sample_index = 0; sample_index < batch_size; sample_index++) { - // gets pointers to the direct memory associated with each sample for efficiency - const long outputkeys_offset = sample_index * depth; - const long input_offset = sample_index * input_size; - long *local_outputkeys_data = &outputkeys_data[outputkeys_offset]; - real *local_input_data = &input_data[input_offset]; - - // for each tree in the forest - for (long i = 0; i < roots_size; i++) { - int root = 1; - long node_id = root_ids_data[i]; - - // traverses the whole tree keeping track of which nodes were seen - while (1) { - if (root) { - // root nodes aren't added to output because they are always traversed - root = 0; - } - else if (!only_last_node) { - // updates the outputsize for all samples traversing this node; and - // set the traversed node as a feature in output for exampleIds - long output_index = outputsize[sample_index]; - // updates the outputsize for all samples traversing this node - outputsize[sample_index]++; - // sets the traversed node as a feature in output for exampleIds - local_outputkeys_data[output_index] = node_id; - } - - // gets the left and right nodes. values of -1 represent missing node - long left_id = left_child_data[node_id-1]; - long right_id = right_child_data[node_id-1]; - - if (left_id <= 0 && right_id <= 0) { - if (only_last_node) { - long output_index = outputsize[sample_index]; - outputsize[sample_index]++; - local_outputkeys_data[output_index] = node_id; - } - // if no children, stops - break; - } - else if (left_id <= 0) { - // if no left child, traverses right node - node_id = right_id; - } - else if (right_id <= 0) { - // if no right child, traverses left node - node_id = left_id; - } - else { - // if both left and right children, finds the direction for this sample - // first get the reference from the node - real split_value = split_feature_value_data[node_id-1]; - long split_id = split_feature_id_data[node_id-1]-1; - - // then gets the value of the sample - real node_value = local_input_data[split_id]; - // and branchs - if (node_value < split_value) - node_id = left_id; - else - node_id = right_id; - } - } - } - } - - // now that we know which nodes were traverse for each sample, we can create the sparse output - // with 1 entry pair for each sample - THTensor *input_feature = THTensor_(new)(); - THLongTensor *indices = THLongTensor_new(); - - // pushes the return table with 2 children tables - lua_newtable(L); - lua_pushinteger(L, 1); - lua_newtable(L); - lua_pushinteger(L, 2); - lua_newtable(L); - - // for each sample... - for (long i = 0; i < batch_size; i++) { - long j = outputsize[i]; - // selects the tensor lines from the dense output - THLongTensor_select(indices, outputkeys, 0, i); - THTensor_(select)(input_feature, outputvalues, 0, i); - - // narrows the keys to actual number of nodes traversed and saves to the output - lua_pushinteger(L, i+1); - luaT_pushudata(L, THLongTensor_newNarrow(indices, 0, 0, j), "torch.LongTensor"); - lua_settable(L, -5); - - // and narrows the values - lua_pushinteger(L, i+1); - luaT_pushudata(L, THTensor_(newNarrow)(input_feature, 0, 0, j), torch_Tensor); - lua_settable(L, -3); - } - - // pushes the two parts of the output into the output table - lua_settable(L, -5); - lua_settable(L, -3); - - THLongTensor_free(indices); - THTensor_(free)(input_feature); - - return 1; -} - -static const struct luaL_Reg nn_(DFD__) [] = { - {"DFD_computeOutput", nn_(DFD_computeOutput)}, - {NULL, NULL} -}; - -static void nn_(DFD_init)(lua_State *L) -{ - luaT_pushmetatable(L, torch_Tensor); - luaT_registeratname(L, nn_(DFD__), "nn"); - lua_pop(L,1); -} - -#endif diff --git a/contrib/lua-torch/decisiontree/generic/GBDT.c b/contrib/lua-torch/decisiontree/generic/GBDT.c deleted file mode 100644 index 31f5b025d9..0000000000 --- a/contrib/lua-torch/decisiontree/generic/GBDT.c +++ /dev/null @@ -1,392 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/GBDT.c" -#else - -#include "GBDT_internal.h" -#include "GBDT_internal.c" - -// note that each one of the functions to find the best split is a subset of the next. -// first we have one that can only evaluate a single feature, using the logic in lua to control the -// features -// then we have one that can go over a shard of faetures, following the feature parallelism -// introduced by the lua logic -// and finally we have one that performans the feature parallelism itself in the special case of -// dense tensors -// these functions are provided for completeness and to test in case the logic is to be changed - -// finds the best split for a given node and feature -static int nn_(gb_findBestFeatureSplit)(lua_State *L) { - THLongTensor *exampleIds = luaT_checkudata(L, 1, "torch.LongTensor"); - const int dataset_index = 2; - if (!lua_isnumber(L, 3)) - return LUA_HANDLE_ERROR_STR(L, "third argument should be an integer"); - long feature_id = lua_tointeger(L, 3); - if (!lua_isnumber(L, 4)) - return LUA_HANDLE_ERROR_STR(L, "fourth argument should be an integer"); - long minLeafSize = lua_tointeger(L, 4); - // Since minLeafSize == 1 corresponds to each sample in its own leaf, any value below it doesn't - // make sense - if (minLeafSize < 1) - minLeafSize = 1; - THTensor *grad = luaT_checkudata(L, 5, torch_Tensor); - THTensor *hess = luaT_checkudata(L, 6, torch_Tensor); - - if (!THLongTensor_isContiguous(exampleIds)) - return LUA_HANDLE_ERROR_STR(L, "exampleIds has to be contiguous"); - if (!THTensor_(isContiguous)(grad)) - return LUA_HANDLE_ERROR_STR(L, "grad has to be contiguous"); - if (!THTensor_(isContiguous)(hess)) - return LUA_HANDLE_ERROR_STR(L, "hessian has to be contiguous"); - - // initializes the static data - nn_(GBInitialization) initialization_data; - nn_(gb_initialize)(L, &initialization_data, exampleIds, grad, hess, dataset_index); - - // initializes the dynamic data - GBRunData run_data; - gb_create_run_data(&run_data, minLeafSize); - - // finds the best state possible for the split - nn_(GBBestState) bs; - nn_(gb_find_best_feature_split)(L, &initialization_data, &bs, feature_id, &run_data); - - lua_pop(L, lua_gettop(L) - initialization_data.splitInfo_index); - - // fills the table we the best split found and the lua logic above will do everything else - // if no state was found, returns nil - if (bs.valid_state == 0) { - lua_pop(L, 1); - lua_pushnil(L); - } - else { - nn_(gb_internal_split_info)(L, &bs, initialization_data.splitInfo_index); - } - - gb_destroy_run_data(&run_data); - - return 1; -} - -// finds the best split for a given node and shard of features -// this is more efficient than calling the previous one multiple times -static int nn_(gb_findBestSplit)(lua_State *L) { - THLongTensor *exampleIds = luaT_checkudata(L, 1, "torch.LongTensor"); - const int dataset_index = 2; - THLongTensor *feature_ids = luaT_checkudata(L, 3, "torch.LongTensor"); - if (!lua_isnumber(L, 4)) - return LUA_HANDLE_ERROR_STR(L, "fourth argument should be an integer"); - long minLeafSize = lua_tointeger(L, 4); - // Since minLeafSize == 1 corresponds to each sample in its own leaf, any value below it doesn't - // make sense - if (minLeafSize < 1) - minLeafSize = 1; - if (!lua_isnumber(L, 5)) - return LUA_HANDLE_ERROR_STR(L, "fifth argument should be an integer"); - long shardId = lua_tointeger(L, 5); - if (!lua_isnumber(L, 6)) - return LUA_HANDLE_ERROR_STR(L, "sixth argument should be an integer"); - long nShard = lua_tointeger(L, 6); - THTensor *grad = luaT_checkudata(L, 7, torch_Tensor); - THTensor *hess = luaT_checkudata(L, 8, torch_Tensor); - - if (!THLongTensor_isContiguous(exampleIds)) - return LUA_HANDLE_ERROR_STR(L, "exampleIds has to be contiguous"); - if (!THTensor_(isContiguous)(grad)) - return LUA_HANDLE_ERROR_STR(L, "grad has to be contiguous"); - if (!THTensor_(isContiguous)(hess)) - return LUA_HANDLE_ERROR_STR(L, "hessian has to be contiguous"); - - // initializes the static data - nn_(GBInitialization) initialization_data; - nn_(gb_initialize)(L, &initialization_data, exampleIds, grad, hess, dataset_index); - - // initializes the dynamic data - GBRunData run_data; - gb_create_run_data(&run_data, minLeafSize); - - // initializes to evaluate all the features in this shard - nn_(GBBestState) global_bs; - global_bs.valid_state = 0; - long n_features = THLongTensor_size(feature_ids, 0); - if (!THLongTensor_isContiguous(feature_ids)) - return LUA_HANDLE_ERROR_STR(L, "feature_ids must be contiguous"); - long *feature_ids_data = THLongTensor_data(feature_ids); - - // for every feature - for (long i = 0; i < n_features; i++) { - long feature_id = feature_ids_data[i]; - // if we are responsible for it - if (nShard <= 1 || (feature_id % nShard) + 1 == shardId) { - // finds the best state possible for the split - nn_(GBBestState) bs; - nn_(gb_find_best_feature_split)(L, &initialization_data, &bs, feature_id, &run_data); - - // if it's valid and better than one we found before, saves it - if (bs.valid_state) { - if (global_bs.valid_state == 0 || bs.gain < global_bs.gain) { - global_bs = bs; - } - } - } - } - - lua_pop(L, lua_gettop(L) - initialization_data.splitInfo_index); - - // fills the table we the best split found and the lua logic above will do everything else - // if no state was found, returns nil - if (global_bs.valid_state == 0) { - lua_pop(L, 1); - lua_pushnil(L); - } - else { - nn_(gb_internal_split_info)(L, &global_bs, initialization_data.splitInfo_index); - } - - gb_destroy_run_data(&run_data); - - return 1; -} - -// all the info we have to apss to the slave threads so that they can do their jobs -// note that we do not pass the lua state since it isn't required. we perform direct C parallelism -// instead of using lua's parallelism like with the previous version -typedef struct { - nn_(GBInitialization) *initialization_data; - GBRunData *run_data; - long *index; - nn_(GBBestState) *global_bs; - long n_features; - long *feature_ids_data; - pthread_mutex_t *mutex; - THLongTensor *exampleIds; - THTensor *input; - THLongTensor **sorted_ids_per_feature; -} nn_(ThreadInfo); - -// loops over all the features in parallel and finds the best global split -static void* nn_(thread_worker)(void *arg) { - nn_(ThreadInfo) *info = (nn_(ThreadInfo) *)arg; - - while (1) { - pthread_mutex_lock(info->mutex); - long index = (*info->index); - (*info->index)++; - pthread_mutex_unlock(info->mutex); - - if (index >= info->n_features) - break; - - // performs part of steps (1) and (2) of gb_find_best_feature_split without having to access the - // lua state using pre-loaded data - long feature_id = info->feature_ids_data[index]; - THLongTensor *exampleIdsWithFeature_ret = info->exampleIds; - THLongTensor *featureExampleIds = info->sorted_ids_per_feature[index]; - nn_(GBInitialization) *initialization_data = info->initialization_data; - GBRunData *run_data = info->run_data; - - // performs steps (3) and (4) of gb_find_best_feature_split since (1) and (2) were already - // performed before - nn_(GBBestState) bs; - nn_(gb_internal_create)(initialization_data->grad, initialization_data->hess, - exampleIdsWithFeature_ret, &bs.state); - nn_(gb_internal_get_best_split_special)(&bs, featureExampleIds, run_data->exampleMap, - info->input, run_data->minLeafSize, feature_id); - - // saves to the global state if it's better - if (bs.valid_state) { - pthread_mutex_lock(info->mutex); - if (info->global_bs->valid_state == 0 || bs.gain < info->global_bs->gain) { - (*info->global_bs) = bs; - } - pthread_mutex_unlock(info->mutex); - } - } - - return NULL; -} - -// finds the global best split by doing feature parallelism directly in C -static int nn_(gb_findBestSplitFP)(lua_State *L) { - THLongTensor *exampleIds = luaT_checkudata(L, 1, "torch.LongTensor"); - const int dataset_index = 2; - THLongTensor *feature_ids = luaT_checkudata(L, 3, "torch.LongTensor"); - if (!lua_isnumber(L, 4)) - return LUA_HANDLE_ERROR_STR(L, "fourth argument should be an integer"); - long minLeafSize = lua_tointeger(L, 4); - THTensor *grad = luaT_checkudata(L, 5, torch_Tensor); - THTensor *hess = luaT_checkudata(L, 6, torch_Tensor); - if (!lua_isnumber(L, 7)) - return LUA_HANDLE_ERROR_STR(L, "seventh argument should be an integer"); - long nThread = lua_tointeger(L, 7); - - if (!THLongTensor_isContiguous(exampleIds)) - return LUA_HANDLE_ERROR_STR(L, "exampleIds has to be contiguous"); - if (!THTensor_(isContiguous)(grad)) - return LUA_HANDLE_ERROR_STR(L, "grad has to be contiguous"); - if (!THTensor_(isContiguous)(hess)) - return LUA_HANDLE_ERROR_STR(L, "hessian has to be contiguous"); - - pthread_mutex_t mutex; - pthread_mutex_init(&mutex, NULL); - - // initializes the static data - nn_(GBInitialization) initialization_data; - nn_(gb_initialize)(L, &initialization_data, exampleIds, grad, hess, dataset_index); - - // initializes the dynamic data - GBRunData run_data; - gb_create_run_data(&run_data, minLeafSize); - - // initializes to evaluate all the features - nn_(GBBestState) global_bs; - global_bs.valid_state = 0; - long n_features = THLongTensor_size(feature_ids, 0); - if (!THLongTensor_isContiguous(feature_ids)) - return LUA_HANDLE_ERROR_STR(L, "feature_ids must be contiguous"); - long *feature_ids_data = THLongTensor_data(feature_ids); - - THTensor *input = luaT_checkudata(L, initialization_data.input_index, torch_Tensor); - - // performs step (1) of gb_find_best_feature_split so that we don't have to pass the lua state - THLongTensor *sorted_ids_per_feature[n_features]; - for (long i = 0; i < n_features; i++) { - long feature_id = feature_ids_data[i]; - lua_pushvalue(L, initialization_data.getSortedFeature_index); - lua_pushvalue(L, initialization_data.dataset_index); - lua_pushinteger(L, feature_id); - lua_call(L, 2, 1); - - THLongTensor *featureExampleIds = luaT_checkudata(L, -1, "torch.LongTensor"); - sorted_ids_per_feature[i] = featureExampleIds; - } - - // performas step (2) of gb_find_best_feature_split since it's the same for all features when the - // data is dense - long exampleIds_size = THLongTensor_size(initialization_data.exampleIds, 0); - long *exampleIds_data = THLongTensor_data(initialization_data.exampleIds); - - int ret; - kh_resize(long, run_data.exampleMap, exampleIds_size*8); - for (long i = 0; i < exampleIds_size; i++) - kh_put(long, run_data.exampleMap, exampleIds_data[i], &ret); - - // saves the info for the threads - long index = 0; - nn_(ThreadInfo) info; - info.initialization_data = &initialization_data; - info.run_data = &run_data; - info.index = &index; - info.global_bs = &global_bs; - info.n_features = n_features; - info.feature_ids_data = feature_ids_data; - info.mutex = &mutex; - info.exampleIds = exampleIds; - info.input = input; - info.sorted_ids_per_feature = sorted_ids_per_feature; - - pthread_t threads[nThread]; - - // let the threads run like crazy over the features to find the minimum - for (long i = 0; i < nThread; i++) { - int ret = pthread_create(&threads[i], NULL, nn_(thread_worker), &info); - if (ret) - return LUA_HANDLE_ERROR_STR(L, "falied to create thread"); - } - - for (long i = 0; i < nThread; i++) { - int ret = pthread_join(threads[i], NULL); - if (ret) - return LUA_HANDLE_ERROR_STR(L, "failed to join thread"); - } - - lua_pop(L, lua_gettop(L) - initialization_data.splitInfo_index); - - // fills the table we the best split found and the lua logic above will do everything else - // if no state was found, returns nil - if (global_bs.valid_state == 0) { - lua_pop(L, 1); - lua_pushnil(L); - } - else { - nn_(gb_internal_split_info)(L, &global_bs, initialization_data.splitInfo_index); - } - - gb_destroy_run_data(&run_data); - pthread_mutex_destroy(&mutex); - - return 1; -} - -// performs an efficient branch of the current examples based on a split info provided -static int nn_(gb_branch)(lua_State *L) { - if (!lua_istable(L, 1)) - return LUA_HANDLE_ERROR_STR(L, "first argument must be a table"); - THTensor *input = luaT_checkudata(L, 2, torch_Tensor); - THLongTensor *exampleIds = luaT_checkudata(L, 3, "torch.LongTensor"); - - // gets direct access to the dataset - long n_exampleIds = THLongTensor_size(exampleIds, 0); - long *exampleIds_data = THLongTensor_data(exampleIds); - long n_features = THTensor_(size)(input, 1); - real *input_data = THTensor_(data)(input); - - // creates the tensors to be returned - luaT_pushudata(L, THLongTensor_new(), "torch.LongTensor"); - luaT_pushudata(L, THLongTensor_new(), "torch.LongTensor"); - THLongTensor *leftExampleIds = luaT_checkudata(L, 4, "torch.LongTensor"); - THLongTensor *rightExampleIds = luaT_checkudata(L, 5, "torch.LongTensor"); - THLongTensor_resize1d(leftExampleIds, n_exampleIds); - - // gets direct access to the examples - THLongTensor *splitExampleIds = leftExampleIds; - long *splitExampleIds_data = THLongTensor_data(splitExampleIds); - - // gets the split info - lua_pushstring(L, "splitId"); - lua_rawget(L, 1); - const long splitId = lua_tointeger(L, -1); - lua_pushstring(L, "splitValue"); - lua_rawget(L, 1); - const real splitValue = lua_tonumber(L, -1); - lua_pop(L, 2); - - long leftIdx = 0, rightIdx = 0; - - // goes over all the samples dividing them into the two sides - for (long i = 0; i < n_exampleIds; i++) { - long exampleId = exampleIds_data[i]; - real val = input_data[(exampleId-1) * n_features + (splitId - 1)]; - if (val <= splitValue) { - leftIdx++; - splitExampleIds_data[leftIdx-1] = exampleId; - } - else { - rightIdx++; - splitExampleIds_data[n_exampleIds - rightIdx + 1 - 1] = exampleId; - } - } - - // once done, the resulting tensors are just splits of the sample base. this is more efficient - // than having 2 tensors since we didn't know where the split would happen (how much to each - // side), but we knew that the sum would be constant - THLongTensor_narrow(rightExampleIds, splitExampleIds, 0, n_exampleIds-rightIdx+1-1, rightIdx); - THLongTensor_narrow(leftExampleIds, splitExampleIds, 0, 0, leftIdx); - return 2; -} - -static const struct luaL_Reg nn_(GBDT__) [] = { - {"GBDT_findBestFeatureSplit", nn_(gb_findBestFeatureSplit)}, - {"GBDT_findBestSplit", nn_(gb_findBestSplit)}, - {"GBDT_findBestSplitFP", nn_(gb_findBestSplitFP)}, - {"GBDT_branch", nn_(gb_branch)}, - {NULL, NULL} -}; - -static void nn_(GBDT_init)(lua_State *L) -{ - luaT_pushmetatable(L, torch_Tensor); - luaT_registeratname(L, nn_(GBDT__), "nn"); - lua_pop(L,1); -} - -#endif diff --git a/contrib/lua-torch/decisiontree/generic/GBDT_internal.c b/contrib/lua-torch/decisiontree/generic/GBDT_internal.c deleted file mode 100644 index 739aabf258..0000000000 --- a/contrib/lua-torch/decisiontree/generic/GBDT_internal.c +++ /dev/null @@ -1,312 +0,0 @@ -// initializes the optimization structure based on the arguments provided, either filling directly -// or making calls to lua to load some kind of data -static void nn_(gb_initialize)(lua_State *L, nn_(GBInitialization) *initialization_data, - THLongTensor *exampleIds, THTensor *grad, THTensor *hess, int dataset_index) { - initialization_data->dataset_index = dataset_index; - initialization_data->exampleIds = exampleIds; - initialization_data->grad = grad; - initialization_data->hess = hess; - - lua_newtable(L); - initialization_data->splitInfo_index = lua_gettop(L); - - lua_pushstring(L, "input"); - lua_gettable(L, dataset_index); - initialization_data->input_index = lua_gettop(L); - - lua_pushstring(L, "getSortedFeature"); - lua_gettable(L, dataset_index); - initialization_data->getSortedFeature_index = lua_gettop(L); -} - -// initializes a state that will be passed to the optimizer -static void nn_(gb_internal_create)(THTensor *grad, THTensor *hessian, - THLongTensor *exampleIds, nn_(GBState)* s) { - long *exampleIds_data = THLongTensor_data(exampleIds); - long n_examples = THLongTensor_size(exampleIds, 0); - accreal leftGradientSum = 0; - accreal leftHessianSum = 0; - - real *grad_data = THTensor_(data)(grad); - real *hessian_data = THTensor_(data)(hessian); - - // only sums the relevant gradients and hessians - for (long i = 0; i < n_examples; i++) { - long exampleId = exampleIds_data[i]-1; - leftGradientSum += grad_data[exampleId]; - leftHessianSum += hessian_data[exampleId]; - } - - // we move data from the left branch to the right branch - s->rightGradientSum = 0; - s->rightHessianSum = 1; - s->nExampleInRightBranch = 0; - s->leftGradientSum = leftGradientSum; - s->leftHessianSum = leftHessianSum + 1; - s->nExampleInLeftBranch = n_examples; - - // stores the loss in parent for efficiency - real lossInParent = computeGradientBoostLoss(s->leftGradientSum + s->rightGradientSum, - s->leftHessianSum + s->rightHessianSum); - s->lossInParent = lossInParent; - - // caches the direct pointers to the data for efficiency - s->grad_data = grad_data; - s->hessian_data = hessian_data; -} - -// computes the gain obtained by performing the split -static real nn_(computeSplitGain)(nn_(GBState) *s) { - real lossInLeftBranch = computeGradientBoostLoss(s->leftGradientSum, s->leftHessianSum); - real lossInRightBranch = computeGradientBoostLoss(s->rightGradientSum, s->rightHessianSum); - return lossInLeftBranch + lossInRightBranch - s->lossInParent; -} - -// uses the state information to build the table required by the lua library about the best split -static void nn_(gb_internal_split_info)(lua_State *L, nn_(GBBestState) *bs, int res) { - long feature_id = bs->feature_id; - real feature_value = bs->feature_value; - real gain = bs->gain; - nn_(GBState) *s = &bs->state; - lua_pushstring(L, "splitGain"); - lua_pushnumber(L, gain); - lua_rawset(L, res); - lua_pushstring(L, "splitId"); - lua_pushinteger(L, feature_id); - lua_rawset(L, res); - lua_pushstring(L, "splitValue"); - lua_pushnumber(L, feature_value); - lua_rawset(L, res); - lua_pushstring(L, "leftChildSize"); - lua_pushinteger(L, s->nExampleInLeftBranch); - lua_rawset(L, res); - lua_pushstring(L, "rightChildSize"); - lua_pushinteger(L, s->nExampleInRightBranch); - lua_rawset(L, res); - lua_pushstring(L, "leftGradient"); - lua_pushnumber(L, s->leftGradientSum); - lua_rawset(L, res); - lua_pushstring(L, "rightGradient"); - lua_pushnumber(L, s->rightGradientSum); - lua_rawset(L, res); - lua_pushstring(L, "leftHessian"); - lua_pushnumber(L, s->leftHessianSum); - lua_rawset(L, res); - lua_pushstring(L, "rightHessian"); - lua_pushnumber(L, s->rightHessianSum); - lua_rawset(L, res); -} - -// core of the computation, where we loop over all the relevant samples looking for the best split -// we can find -static void nn_(gb_internal_get_best_split)(lua_State *L, nn_(GBBestState) *bs, - THLongTensor *featureExampleIds, khash_t(long)* exampleMap, int input_table_index, - long minLeafSize, long feature_id) { - nn_(GBState) current_state; - nn_(GBState) best_state; - current_state = bs->state; - - real best_gain = INFINITY; - real best_value = 0; - - // if the data is dense, pre-loads direct access to it - THTensor *input = NULL; - real *input_data = NULL; - long n_features = 0; - if (lua_istable(L, input_table_index)) { - } - else { - input = luaT_checkudata(L, input_table_index, torch_Tensor); - input_data = THTensor_(data)(input); - n_features = THTensor_(size)(input, 1); - } - - long stride = featureExampleIds->stride[0]; - long *featureExampleIds_data = THLongTensor_data(featureExampleIds); - - khiter_t k; - - real previousSplitValue = 0; - // for each example with the given feature and from large to small value... - for (long i = THLongTensor_size(featureExampleIds, 0)-1; i >= 0; i--) { - long exampleId = featureExampleIds_data[i * stride]; - - // checks if the sample is in the list of ones that have to be evaluated by this node - k = kh_get(long, exampleMap, exampleId); - if (k != kh_end(exampleMap)) { - long exampleIdx = exampleId; - - // gets the split value, depending on whether the input is sparse or dense - real splitValue; - if (input_data) { - splitValue = input_data[(exampleId-1) * n_features + feature_id-1]; - } - else { - lua_pushinteger(L, exampleId); - lua_gettable(L, input_table_index); - lua_pushinteger(L, feature_id); - lua_gettable(L, -2); - splitValue = lua_tonumber(L, -1); - lua_pop(L, 2); - } - - // performs one update of the state, moving a sample from the left branch to the right - real gradient = current_state.grad_data[exampleIdx-1]; - real hessian = current_state.hessian_data[exampleIdx-1]; - current_state.leftGradientSum -= gradient; - current_state.rightGradientSum += gradient; - current_state.leftHessianSum -= hessian; - current_state.rightHessianSum += hessian; - current_state.nExampleInLeftBranch--; - current_state.nExampleInRightBranch++; - - // since we remove from the left, once this becomes true, it stays true forever - // hence we stop the loop - if (current_state.nExampleInLeftBranch < minLeafSize) - break; - - if (current_state.nExampleInRightBranch >= minLeafSize) { - // if the values are equal between the steps, it doesn't make sense to evaluate the score - // since we won't be able to separate the two - if (previousSplitValue != splitValue) { - // computes the gain **without including the parent** since it doesn't change as we move - // examples between branches - real lossInLeftBranch = computeGradientBoostLoss(current_state.leftGradientSum, current_state.leftHessianSum); - real lossInRightBranch = computeGradientBoostLoss(current_state.rightGradientSum, current_state.rightHessianSum); - real current_gain = lossInLeftBranch + lossInRightBranch; - if (current_gain < best_gain) { - best_gain = current_gain; - best_value = splitValue; - best_state = current_state; - } - } - } - previousSplitValue = splitValue; - } - } - - // if there is a valid gain, then marks the state as valid and fills the meta-info - if (!isfinite(best_gain)) { - bs->valid_state = 0; - } - else { - bs->valid_state = 1; - bs->state = best_state; - bs->feature_id = feature_id; - bs->gain = nn_(computeSplitGain)(&bs->state); - bs->feature_value = best_value; - } -} - -// exactly like the previous version, but direct access to the data for efficiency. it also doesn't -// rely on the lua state in the particular case of dense data, so we can evaluate this without using -// the lua state -static void nn_(gb_internal_get_best_split_special)(nn_(GBBestState) *bs, - THLongTensor *featureExampleIds, khash_t(long)* exampleMap, THTensor *input, long minLeafSize, - long feature_id) { - nn_(GBState) current_state; - nn_(GBState) best_state; - current_state = bs->state; - - real best_gain = INFINITY; - real best_value = 0; - - real *input_data = NULL; - long n_features = 0; - input_data = THTensor_(data)(input); - n_features = THTensor_(size)(input, 1); - - long stride = featureExampleIds->stride[0]; - long *featureExampleIds_data = THLongTensor_data(featureExampleIds); - - khiter_t k; - - real previousSplitValue = 0; - for (long i = THLongTensor_size(featureExampleIds, 0)-1; i >= 0; i--) { - long exampleId = featureExampleIds_data[i * stride]; - - k = kh_get(long, exampleMap, exampleId); - if (k != kh_end(exampleMap)) { - long exampleIdx = exampleId; - - // THIS is the main part that changes. seems crazy to have a special case just for this, but - // since there are a **lot** of samples to be evaluated, the "if" in the previous case can - // become expensive - real splitValue; - splitValue = input_data[(exampleId-1) * n_features + feature_id-1]; - - real gradient = current_state.grad_data[exampleIdx-1]; - real hessian = current_state.hessian_data[exampleIdx-1]; - current_state.leftGradientSum -= gradient; - current_state.rightGradientSum += gradient; - current_state.leftHessianSum -= hessian; - current_state.rightHessianSum += hessian; - current_state.nExampleInLeftBranch--; - current_state.nExampleInRightBranch++; - - // since we remove from the left, once this becomes true, it stays true forever - // hence we stop the loop - if (current_state.nExampleInLeftBranch < minLeafSize) - break; - - // This will always fail in the first pass since minLeafSize >= 1 and nExampleInRightBranch - // starts at 0 - if (current_state.nExampleInRightBranch >= minLeafSize) { - if (previousSplitValue != splitValue) { - real lossInLeftBranch = computeGradientBoostLoss(current_state.leftGradientSum, current_state.leftHessianSum); - real lossInRightBranch = computeGradientBoostLoss(current_state.rightGradientSum, current_state.rightHessianSum); - real current_gain = lossInLeftBranch + lossInRightBranch; - if (current_gain < best_gain) { - best_gain = current_gain; - best_value = splitValue; - best_state = current_state; - } - } - } - previousSplitValue = splitValue; - } - } - - if (!isfinite(best_gain)) { - bs->valid_state = 0; - } - else { - bs->valid_state = 1; - bs->state = best_state; - bs->feature_id = feature_id; - bs->gain = nn_(computeSplitGain)(&bs->state); - bs->feature_value = best_value; - } -} - -// core of the computation to find the split for a given feature and is divided in 4 steps -static void nn_(gb_find_best_feature_split)(lua_State *L, - nn_(GBInitialization) *initialization_data, nn_(GBBestState) *bs, long feature_id, - GBRunData *run_data) { - - // 1) loads the examples in the dataset ordered by their feature value - lua_pushvalue(L, initialization_data->getSortedFeature_index); - lua_pushvalue(L, initialization_data->dataset_index); - lua_pushinteger(L, feature_id); - lua_call(L, 2, 1); - - THLongTensor *featureExampleIds = luaT_checkudata(L, -1, "torch.LongTensor"); - - // 2) processes the data to find the intersection between the examples in the dataset and the - // examples the current node has to evaluate - THLongTensor *exampleIdsWithFeature_ret = gb_internal_prepare(L, initialization_data->exampleIds, - run_data->exampleIdsWithFeature_cache, initialization_data->input_index, feature_id, - run_data->exampleMap); - if (!exampleIdsWithFeature_ret) { - bs->valid_state = 0; - return; - } - - // 3) creates a new state to be used by the optimizer - nn_(gb_internal_create)(initialization_data->grad, initialization_data->hess, - exampleIdsWithFeature_ret, &bs->state); - - // 4) optimize away! - nn_(gb_internal_get_best_split)(L, bs, featureExampleIds, run_data->exampleMap, - initialization_data->input_index, run_data->minLeafSize, feature_id); -} diff --git a/contrib/lua-torch/decisiontree/generic/GBDT_internal.h b/contrib/lua-torch/decisiontree/generic/GBDT_internal.h deleted file mode 100644 index 7119365cfe..0000000000 --- a/contrib/lua-torch/decisiontree/generic/GBDT_internal.h +++ /dev/null @@ -1,34 +0,0 @@ -// representation of a state used while searching for the best split -typedef struct { - real leftGradientSum, rightGradientSum; - real leftHessianSum, rightHessianSum; - real lossInParent; - long nExampleInLeftBranch, nExampleInRightBranch; - real *grad_data, *hessian_data; -} nn_(GBState); - -// representation for the best state found for a given feature -typedef struct { - nn_(GBState) state; - real gain; - long feature_id; - real feature_value; - int valid_state; -} nn_(GBBestState); - -// full data that must be initialized before calling the optimizer -typedef struct { - // *_index represent positions on the lua stack - int dataset_index; - int splitInfo_index; - int input_index; - // position of the dataset's function to return the samples ordered for a given feature - int getSortedFeature_index; - - // samples that this node has to evaluate - THLongTensor *exampleIds; - - // cached gradient and hessian for all data - THTensor *grad; - THTensor *hess; -} nn_(GBInitialization); diff --git a/contrib/lua-torch/decisiontree/generic/LogitBoostCriterion.c b/contrib/lua-torch/decisiontree/generic/LogitBoostCriterion.c deleted file mode 100644 index f2ea1ef091..0000000000 --- a/contrib/lua-torch/decisiontree/generic/LogitBoostCriterion.c +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/LogitBoostCriterion.c" -#else - -#define EPS 1e-12 - -static int nn_(LogitBoostCriterion_updateOutput)(lua_State *L) -{ - THTensor *input = luaT_checkudata(L, 1, torch_Tensor); - THTensor *target = luaT_checkudata(L, 2, torch_Tensor); - THTensor *output = luaT_checkudata(L, 3, torch_Tensor); - int sizeAverage = lua_toboolean(L, 4); - - if (THTensor_(nElement)(input) != THTensor_(nElement)(target)) { - luaL_error(L, "inconsistent input and target size"); - } - THTensor_(resize1d)(output, 1); - - real sum = 0; - - TH_TENSOR_APPLY2(real, input, real, target, - real x = *input_data; - real y = *target_data; - // math.log(1 + math.exp(target[i] <= 0 and input[i] or -input[i])) - sum += log(1 + exp(y <= 0 ? x : -x)); - ); - - if (sizeAverage) - sum /= THTensor_(nElement)(input); - - THTensor_(set1d)(output, 0, sum); - return 0; -} - -static int nn_(LogitBoostCriterion_updateGradInput)(lua_State *L) -{ - THTensor *input = luaT_checkudata(L, 1, torch_Tensor); - THTensor *target = luaT_checkudata(L, 2, torch_Tensor); - THTensor *gradInput = luaT_checkudata(L, 3, torch_Tensor); - - if (THTensor_(nElement)(input) != THTensor_(nElement)(target)) { - luaL_error(L, "inconsistent input and target size"); - } - THTensor_(resizeAs)(gradInput, input); - - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - real x = *input_data; - real y = *target_data; - real p = (x >= 0) ? (1 / (1 + exp(-x))) : (1 - 1 / (1 + exp(x))); - *gradInput_data = (y <= 0) ? p : (p - 1); - ); - - return 0; -} - -static int nn_(LogitBoostCriterion_updateHessInput)(lua_State *L) -{ - THTensor *input = luaT_checkudata(L, 1, torch_Tensor); - THTensor *target = luaT_checkudata(L, 2, torch_Tensor); - THTensor *hessInput = luaT_checkudata(L, 3, torch_Tensor); - - if (THTensor_(nElement)(input) != THTensor_(nElement)(target)) { - luaL_error(L, "inconsistent input and target size"); - } - THTensor_(resizeAs)(hessInput, input); - - TH_TENSOR_APPLY3(real, hessInput, real, input, real, target, - real x = *input_data; - real p = (x >= 0) ? (1 / (1 + exp(-x))) : (1 - 1 / (1 + exp(x))); - *hessInput_data = p * (1.0 - p); - ); - - return 0; -} - -static const struct luaL_Reg nn_(LogitBoostCriterion__) [] = { - {"LogitBoostCriterion_updateOutput", nn_(LogitBoostCriterion_updateOutput)}, - {"LogitBoostCriterion_updateGradInput", nn_(LogitBoostCriterion_updateGradInput)}, - {"LogitBoostCriterion_updateHessInput", nn_(LogitBoostCriterion_updateHessInput)}, - {NULL, NULL} -}; - -static void nn_(LogitBoostCriterion_init)(lua_State *L) -{ - luaT_pushmetatable(L, torch_Tensor); - luaT_registeratname(L, nn_(LogitBoostCriterion__), "nn"); - lua_pop(L,1); -} - -#endif diff --git a/contrib/lua-torch/decisiontree/generic/S2D.c b/contrib/lua-torch/decisiontree/generic/S2D.c deleted file mode 100644 index 2392ee7c8f..0000000000 --- a/contrib/lua-torch/decisiontree/generic/S2D.c +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/S2D.c" -#else - -static int nn_(S2D_computeOutput)(lua_State *L) { - THTensor *output = luaT_checkudata(L, 1, torch_Tensor); - const int keys_index = 2; - const int values_index = 3; - const int masks_index = 4; - - if (!lua_istable(L, keys_index)) - return LUA_HANDLE_ERROR_STR(L, "expeced position 2 to be a table"); - if (!lua_istable(L, values_index)) - return LUA_HANDLE_ERROR_STR(L, "expeced position 3 to be a table"); - if (!lua_istable(L, masks_index)) - return LUA_HANDLE_ERROR_STR(L, "expeced position 4 to be a table"); - - - THLongTensor *features = luaT_checkudata(L, 5, "torch.LongTensor"); - - const int original_top = lua_gettop(L); - - long outputsize = THLongTensor_size(features, 0); - long batch_size = lua_objlen(L, keys_index); - - // initializes output - THTensor_(resize2d)(output, batch_size, outputsize); - THTensor_(zero)(output); - real *output_data = THTensor_(data)(output); - - // iterates over samples - lua_pushnil(L); - const int local_top = lua_gettop(L); - while (lua_next(L, keys_index) != 0) { - // gets data corresponding to the current sample - long i = lua_tointeger(L, -2)-1; - real *current_output_data = &output_data[i * outputsize]; - THLongTensor *keys = luaT_checkudata(L, -1, "torch.LongTensor"); - lua_rawgeti(L, values_index, i+1); - THTensor *values = luaT_checkudata(L, -1, torch_Tensor); - lua_rawgeti(L, masks_index, i+1); - THByteTensor *mask = luaT_checkudata(L, -1, "torch.ByteTensor"); - - long n_keys = THLongTensor_size(keys, 0); - long n_values = THTensor_(size)(values, 0); - - // quick safety check - if (n_keys != n_values) - return LUA_HANDLE_ERROR_STR(L, "keys and values have to have the same size"); - - // gets the direct memory pointers - long *keys_data = THLongTensor_data(keys); - real *values_data = THTensor_(data)(values); - unsigned char *mask_data = THByteTensor_data(mask); - - // for each value in the sparse input... - for (long j = 0; j < n_keys; j++) { - // loads the value and key - real current_value = values_data[j]; - long current_key = keys_data[j]; - unsigned char current_mask = mask_data[j]; - - // if the feature is present in the map - if (current_mask) - // saves in the given position - current_output_data[current_key-1] = current_value; - } - // cleans up the trash we create by iterating over keys to avoid it from overflowing - lua_pop(L, lua_gettop(L) - local_top); - } - - // cleans up the trash we added to the stack - lua_pop(L, lua_gettop(L) - original_top); - - return 0; -} - -static const struct luaL_Reg nn_(S2D__) [] = { - {"S2D_computeOutput", nn_(S2D_computeOutput)}, - {NULL, NULL} -}; - -static void nn_(S2D_init)(lua_State *L) -{ - luaT_pushmetatable(L, torch_Tensor); - luaT_registeratname(L, nn_(S2D__), "nn"); - lua_pop(L,1); -} - -#endif diff --git a/contrib/lua-torch/decisiontree/hash_map.c b/contrib/lua-torch/decisiontree/hash_map.c deleted file mode 100644 index 2c679e2079..0000000000 --- a/contrib/lua-torch/decisiontree/hash_map.c +++ /dev/null @@ -1,445 +0,0 @@ -#include "utils.h" -#include "hash_map.h" -#include "internal_hash_map.h" -#include - -hash_map_t hash_map_init(void) { - return kh_init(long); -} - -void hash_map_destroy(hash_map_t h_) { - internal_hash_map_t h = (internal_hash_map_t) h_; - kh_destroy(long, h); -} - -void hash_map_clear(hash_map_t h_) { - internal_hash_map_t h = (internal_hash_map_t) h_; - kh_clear(long, h); -} - -int hash_map_put(hash_map_t h_, long key, long val) { - internal_hash_map_t h = (internal_hash_map_t) h_; - int ret; - khiter_t k = kh_put(long, h, key, &ret); - ret = (ret >= 0); - if (ret) - kh_value(h, k) = val; - return ret; -} - -int hash_map_put_tensor(hash_map_t h_, THLongTensor *keys_, THLongTensor *vals_) { - long *keys = THLongTensor_data(keys_); - long *vals = THLongTensor_data(vals_); - long size = get_tensor_size(keys_, Long); - for (long i = 0; i < size; i++) - if (!hash_map_put(h_, keys[i], vals[i])) - return 0; - return 1; -} - -int hash_map_fill(hash_map_t h_, long key, long *counter) { - internal_hash_map_t h = (internal_hash_map_t) h_; - khiter_t k = kh_get(long, h, key); - if (k == kh_end(h)) - return hash_map_put(h_, key, ++(*counter)); - return 1; -} - -int hash_map_fill_tensor(hash_map_t h_, THLongTensor *keys_, long *counter) { - long *keys = THLongTensor_data(keys_); - long size = get_tensor_size(keys_, Long); - for (long i = 0; i < size; i++) - if (!hash_map_fill(h_, keys[i], counter)) - return 0; - return 1; -} - -int hash_map_get(hash_map_t h_, long key, long* val) { - internal_hash_map_t h = (internal_hash_map_t) h_; - khiter_t k = kh_get(long, h, key); - if (k == kh_end(h)) - return 0; - *val = kh_value(h, k); - return 1; -} - -void hash_map_get_tensor(hash_map_t h_, THLongTensor *keys_, THLongTensor *vals_, THByteTensor *mask_) { - long *keys = THLongTensor_data(keys_); - long *vals = THLongTensor_data(vals_);; - unsigned char *mask = THByteTensor_data(mask_); - long size = get_tensor_size(keys_, Long); - for (long i = 0; i < size; i++) - mask[i] = hash_map_get(h_, keys[i], &vals[i]); -} - -void hash_map_del(hash_map_t h_, long key) { - internal_hash_map_t h = (internal_hash_map_t) h_; - khiter_t k = kh_get(long, h, key); - if (k != kh_end(h)) - kh_del(long, h, k); -} - -void hash_map_del_tensor(hash_map_t h_, THLongTensor *keys_) { - long *keys = THLongTensor_data(keys_); - long size = get_tensor_size(keys_, Long); - for (long i = 0; i < size; i++) - hash_map_del(h_, keys[i]); -} - -size_t hash_map_size(hash_map_t h_) { - internal_hash_map_t h = (internal_hash_map_t) h_; - return kh_size(h); -} - -void hash_map_to_tensor(hash_map_t h_, THLongTensor *keys_, THLongTensor *vals_) { - internal_hash_map_t h = (internal_hash_map_t) h_; - long *keys = THLongTensor_data(keys_); - long *vals = THLongTensor_data(vals_); - long key, val, i = 0; - kh_foreach(h, key, val, { - keys[i] = key; - vals[i] = val; - i++; - }); -} - -static void autolock(hash_map_lua_t *h) { - if (h->autolock) { - pthread_mutex_lock(&h->mutex); - } -} - -static void autounlock(hash_map_lua_t *h) { - if (h->autolock) { - pthread_mutex_unlock(&h->mutex); - } -} - -int hash_map_autolock_on_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - h->autolock = 1; - return 0; -} - -int hash_map_autolock_off_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - h->autolock = 0; - return 0; -} - -int hash_map_init_lua(lua_State *L) { - hash_map_lua_t **hp = (hash_map_lua_t**)lua_newuserdata(L, sizeof(hash_map_lua_t*)); - *hp = (hash_map_lua_t*)malloc(sizeof(hash_map_lua_t)); - hash_map_lua_t *h = *hp; - h->refcount = 1; - h->counter = 0; - h->autolock = 0; - h->h = hash_map_init(); - - pthread_mutexattr_t mutex_attr; - pthread_mutexattr_init(&mutex_attr); - pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE); - pthread_mutex_init(&h->mutex, &mutex_attr); - - luaL_getmetatable(L, "dt.HashMap"); - lua_setmetatable(L, -2); - return 1; -} - -int hash_map_gc_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - if (THAtomicDecrementRef(&h->refcount)) { - pthread_mutex_destroy(&h->mutex); - hash_map_destroy(h->h); - free(h); - } - return 0; -} - -int hash_map_retain_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - THAtomicIncrementRef(&h->refcount); - return 0; -} - -int hash_map_metatablename_lua(lua_State *L) { - lua_pushstring(L, "dt.HashMap"); - return 1; -} - -int hash_map_clear_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - autolock(h); - hash_map_clear(h->h); - autounlock(h); - return 0; -} - -int hash_map_put_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - int ret; -#if LUA_VERSION_NUM <= 501 -#define lua_isinteger lua_isnumber -#endif - if (lua_isinteger(L, 2)) { - if (!lua_isinteger(L, 3)) - return LUA_HANDLE_ERROR_STR(L, "second parameter is not a number"); - long key = lua_tointeger(L, 2); - long val = lua_tointeger(L, 3); - autolock(h); - ret = hash_map_put(h->h, key, val); - autounlock(h); - } - else { - THLongTensor *keys = (THLongTensor *)luaT_checkudata(L, 2, "torch.LongTensor"); - THLongTensor *vals = (THLongTensor *)luaT_checkudata(L, 3, "torch.LongTensor"); - check_tensor(L, keys, THLongTensor); - check_tensor(L, vals, THLongTensor); - check_tensors(L, keys, vals); - autolock(h); - ret = hash_map_put_tensor(h->h, keys, vals); - autounlock(h); - } - if (!ret) - return LUA_HANDLE_ERROR_STR(L, "failed to put into hash map"); - return 0; -} - -int hash_map_fill_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - int ret; - if (lua_isinteger(L, 2)) { - long key = lua_tointeger(L, 2); - autolock(h); - ret = hash_map_fill(h->h, key, &h->counter); - autounlock(h); - } - else { - THLongTensor *keys = (THLongTensor *)luaT_checkudata(L, 2, "torch.LongTensor"); - check_tensor(L, keys, THLongTensor); - autolock(h); - ret = hash_map_fill_tensor(h->h, keys, &h->counter); - autounlock(h); - } - if (!ret) - return LUA_HANDLE_ERROR_STR(L, "failed to fill into hash map"); - return 0; -} - -int hash_map_adjust_counter_lua(lua_State *L) { - hash_map_lua_t *h_ = *(hash_map_lua_t**)lua_touserdata(L, 1); - internal_hash_map_t h = (internal_hash_map_t) h_->h; - - long val; - kh_foreach_value(h, val, { - if (val >= h_->counter) - h_->counter = val; - }); - return 0; -} - -int hash_map_set_counter_lua(lua_State *L) { - hash_map_lua_t *h_ = *(hash_map_lua_t**)lua_touserdata(L, 1); - h_->counter = lua_tointeger(L, 2); - return 0; -} - -int hash_map_get_counter_lua(lua_State *L) { - hash_map_lua_t *h_ = *(hash_map_lua_t**)lua_touserdata(L, 1); - lua_pushinteger(L, h_->counter); - return 1; -} - -static int hash_map_get_tensor_lua(lua_State *L, hash_map_lua_t *h, int inplace) { - THLongTensor *keys = (THLongTensor *)luaT_checkudata(L, 2, "torch.LongTensor"); - check_tensor(L, keys, THLongTensor); - THLongTensor *vals = inplace ? keys : NULL; - THByteTensor *mask = NULL; - - int maskIdx = inplace ? 3 : 4; - - if (!inplace) { - if (lua_gettop(L) < 3) { - vals = THLongTensor_new(); - } else { - vals = (THLongTensor *)luaT_checkudata(L, 3, "torch.LongTensor"); - check_tensor(L, vals, THLongTensor); - } - } - - if (lua_gettop(L) < maskIdx) { - mask = THByteTensor_new(); - } else { - mask = (THByteTensor *)luaT_checkudata(L, maskIdx, "torch.ByteTensor"); - check_tensor(L, mask, THByteTensor); - } - - int n_dim = THLongTensor_nDimension(keys); - THLongStorage *st = THLongStorage_newWithSize1(n_dim); - for (int i = 0; i < n_dim; i++) { - THLongStorage_set(st, i, THLongTensor_size(keys, i)); - } - THByteTensor_resize(mask, st, NULL); - if (!inplace) THLongTensor_resize(vals, st, NULL); - THLongStorage_free(st); - - autolock(h); - hash_map_get_tensor(h->h, keys, vals, mask); - autounlock(h); - - if (!inplace && lua_gettop(L) < 3) - luaT_pushudata(L, vals, "torch.LongTensor"); - if (lua_gettop(L) < maskIdx) - luaT_pushudata(L, mask, "torch.ByteTensor"); - - return 2; -} - -static int hash_map_get_table_lua(lua_State *L, hash_map_lua_t *h, int inplace) { - const int kidx = 2; - const int vidx = inplace ? 2 : 3; - const int midx = inplace ? 3 : 4; - const int narg = lua_gettop(L); - - if (inplace) { - if (narg < 3) { - LUA_HANDLE_ERROR_STR(L, "HashMap.getInplace requires two arguments."); - } - } else { - if (narg < 4) { - LUA_HANDLE_ERROR_STR(L, "HashMap.get requires three arguments."); - } - } - - int count = push_table_contents(L, kidx); - verify_push_table_contents(L, vidx, count); - verify_push_table_contents(L, midx, count); - - THLongTensor *keys; - THLongTensor *vals; - THByteTensor *mask; - for (int i = count - 1; i >= 0; i--) { - int maskIdx = i - count; - int valIdx = maskIdx - count; - int keyIdx = inplace ? valIdx : (valIdx - count); - - keys = (THLongTensor *)luaT_checkudata(L, keyIdx, "torch.LongTensor"); - check_tensor(L, keys, THLongTensor); - if (inplace) { - vals = keys; - } else { - vals = (THLongTensor *)luaT_checkudata(L, valIdx, "torch.LongTensor"); - } - mask = (THByteTensor *)luaT_checkudata(L, maskIdx, "torch.ByteTensor"); - - int n_dim = THLongTensor_nDimension(keys); - THLongStorage *st = THLongStorage_newWithSize1(n_dim); - for (int i = 0; i < n_dim; i++) { - THLongStorage_set(st, i, THLongTensor_size(keys, i)); - } - THByteTensor_resize(mask, st, NULL); - THLongTensor_resize(vals, st, NULL); - THLongStorage_free(st); - - autolock(h); - hash_map_get_tensor(h->h, keys, vals, mask); - autounlock(h); - } - lua_pop(L, (narg - 1) * count); - return 2; -} - -int hash_map_get_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - if (lua_isinteger(L, 2)) { - long key = lua_tointeger(L, 2); - long val; - autolock(h); - int ret = hash_map_get(h->h, key, &val); - autounlock(h); - if (ret) { - lua_pushinteger(L, val); - lua_pushinteger(L, 1); - } - else { - lua_pushinteger(L, 0); - lua_pushinteger(L, 0); - } - } else if (lua_istable(L, 2)) { - return hash_map_get_table_lua(L, h, 0); - } else { - return hash_map_get_tensor_lua(L, h, 0); - } - return 2; -} - -int hash_map_get_inplace_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - if (lua_isinteger(L, 2)) { - LUA_HANDLE_ERROR_STR(L, "HashMap.getInplace does not support integer arguments."); - } else if (lua_istable(L, 2)) { - return hash_map_get_table_lua(L, h, 1); - } else { - return hash_map_get_tensor_lua(L, h, 1); - } - return 2; -} - -int hash_map_del_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - if (lua_isinteger(L, 2)) { - long key = lua_tointeger(L, 2); - autolock(h); - hash_map_del(h->h, key); - autounlock(h); - } - else { - THLongTensor *keys = (THLongTensor *)luaT_checkudata(L, 2, "torch.LongTensor"); - autolock(h); - hash_map_del_tensor(h->h, keys); - autounlock(h); - } - return 0; -} - -int hash_map_size_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - long size = hash_map_size(h->h); - lua_pushinteger(L, size); - return 1; -} - -int hash_map_to_tensor_lua(lua_State *L) { - hash_map_lua_t *h = *(hash_map_lua_t**)lua_touserdata(L, 1); - THLongTensor *keys, *vals; - - if (lua_gettop(L) < 2) { - keys = THLongTensor_new(); - } - else { - keys = (THLongTensor *)luaT_checkudata(L, 2, "torch.LongTensor"); - check_tensor(L, keys, THLongTensor); - } - - if (lua_gettop(L) < 3) { - vals = THLongTensor_new(); - } - else { - vals = (THLongTensor *)luaT_checkudata(L, 3, "torch.LongTensor"); - check_tensor(L, vals, THLongTensor); - } - - size_t size = hash_map_size(h->h); - THLongTensor_resize1d(keys, size); - THLongTensor_resize1d(vals, size); - - autolock(h); - hash_map_to_tensor(h->h, keys, vals); - autounlock(h); - - if (lua_gettop(L) < 2) - luaT_pushudata(L, keys, "torch.LongTensor"); - if (lua_gettop(L) < 3) - luaT_pushudata(L, vals, "torch.LongTensor"); - return 2; -} diff --git a/contrib/lua-torch/decisiontree/hash_map.h b/contrib/lua-torch/decisiontree/hash_map.h deleted file mode 100644 index 5b215e4cac..0000000000 --- a/contrib/lua-torch/decisiontree/hash_map.h +++ /dev/null @@ -1,36 +0,0 @@ -#include "luaT.h" -#include "TH.h" - -typedef void* hash_map_t; - -hash_map_t hash_map_init(void); -void hash_map_destroy(hash_map_t); -void hash_map_clear(hash_map_t); -int hash_map_put(hash_map_t, long key, long val); -int hash_map_put_tensor(hash_map_t, THLongTensor *keys_, THLongTensor *vals_); -int hash_map_fill(hash_map_t, long key, long *counter); -int hash_map_fill_tensor(hash_map_t, THLongTensor *keys_, long *counter); -int hash_map_get(hash_map_t, long key, long *val); -void hash_map_get_tensor(hash_map_t, THLongTensor *keys_, THLongTensor *vals_, THByteTensor *mask_); -void hash_map_del(hash_map_t, long key); -void hash_map_del_tensor(hash_map_t, THLongTensor *keys_); -size_t hash_map_size(hash_map_t); -void hash_map_to_tensor(hash_map_t, THLongTensor *keys_, THLongTensor *vals_); - -int hash_map_autolock_on_lua(lua_State *L); -int hash_map_autolock_off_lua(lua_State *L); -int hash_map_init_lua(lua_State *L); -int hash_map_gc_lua(lua_State *L); -int hash_map_retain_lua(lua_State *L); -int hash_map_metatablename_lua(lua_State *L); -int hash_map_clear_lua(lua_State *L); -int hash_map_put_lua(lua_State *L); -int hash_map_fill_lua(lua_State *L); -int hash_map_adjust_counter_lua(lua_State *L); -int hash_map_set_counter_lua(lua_State *L); -int hash_map_get_counter_lua(lua_State *L); -int hash_map_get_lua(lua_State *L); -int hash_map_get_inplace_lua(lua_State *L); -int hash_map_del_lua(lua_State *L); -int hash_map_size_lua(lua_State *L); -int hash_map_to_tensor_lua(lua_State *L); diff --git a/contrib/lua-torch/decisiontree/init.c b/contrib/lua-torch/decisiontree/init.c deleted file mode 100644 index 276241e8e0..0000000000 --- a/contrib/lua-torch/decisiontree/init.c +++ /dev/null @@ -1,77 +0,0 @@ -#include "TH.h" -#include "luaT.h" - -#ifdef _OPENMP -#include "omp.h" -#endif - -#include "error.h" -#include "hash_map.h" - -#define torch_(NAME) TH_CONCAT_3(torch_, Real, NAME) -#define torch_Tensor TH_CONCAT_STRING_3(torch., Real, Tensor) -#define nn_(NAME) TH_CONCAT_3(nn_, Real, NAME) - -#include "generic/LogitBoostCriterion.c" -#include "THGenerateFloatTypes.h" - -#include "generic/DFD.c" -#include "THGenerateFloatTypes.h" - -#include "generic/S2D.c" -#include "THGenerateFloatTypes.h" - -#include "generic/CartTree.c" -#include "THGenerateFloatTypes.h" - -#include "GBDT_common.h" -#include "generic/GBDT.c" -#include "THGenerateFloatTypes.h" - -static const struct luaL_Reg decisiontree_hash_map_routines[] = { - {"__gc", hash_map_gc_lua}, - {"retain", hash_map_retain_lua}, - {"metatablename", hash_map_metatablename_lua}, - {"clear", hash_map_clear_lua}, - {"put", hash_map_put_lua}, - {"fill", hash_map_fill_lua}, - {"adjustCounter", hash_map_adjust_counter_lua}, - {"getCounter", hash_map_get_counter_lua}, - {"setCounter", hash_map_set_counter_lua}, - {"get", hash_map_get_lua}, - {"getInplace", hash_map_get_inplace_lua}, - {"del", hash_map_del_lua}, - {"size", hash_map_size_lua}, - {"safe", hash_map_autolock_on_lua}, - {"unsafe", hash_map_autolock_off_lua}, - {"toTensors", hash_map_to_tensor_lua}, - {"new", hash_map_init_lua}, - {NULL, NULL} -}; - -DLL_EXPORT int luaopen_libdecisiontree(lua_State *L) -{ - // HashMap - luaL_newmetatable(L, "dt.HashMap"); - lua_pushstring(L, "__index"); - lua_pushvalue(L, -2); - lua_settable(L, -3); - luaT_setfuncs(L, decisiontree_hash_map_routines, 0); - - nn_FloatLogitBoostCriterion_init(L); - nn_DoubleLogitBoostCriterion_init(L); - - nn_FloatDFD_init(L); - nn_DoubleDFD_init(L); - - nn_FloatS2D_init(L); - nn_DoubleS2D_init(L); - - nn_FloatCT_init(L); - nn_DoubleCT_init(L); - - nn_FloatGBDT_init(L); - nn_DoubleGBDT_init(L); - - return 1; -} diff --git a/contrib/lua-torch/decisiontree/init.lua b/contrib/lua-torch/decisiontree/init.lua deleted file mode 100644 index 26f790b605..0000000000 --- a/contrib/lua-torch/decisiontree/init.lua +++ /dev/null @@ -1,70 +0,0 @@ -require 'paths' ---require 'xlua' -require 'string' -require 'os' ---require 'sys' -require 'nn' - --- these actually return local variables but we will re-require them --- when needed. This is just to make sure they are loaded. -require 'moses' - -unpack = unpack or table.unpack - -local dt = require 'decisiontree._env' - --- c lib: -require "paths" -paths.require 'libdecisiontree' - -dt.HashMap = torch.getmetatable("dt.HashMap").new - -dt.EPSILON = 1e-6 - --- experimental Tensor-like container -require 'decisiontree.SparseTensor' - --- functions -require 'decisiontree.math' -require 'decisiontree.utils' - --- for multi-threading ---require 'decisiontree.WorkPool' - --- abstract classes -require 'decisiontree.DecisionTree' -require 'decisiontree.DecisionForest' -require 'decisiontree.DecisionForestTrainer' -require 'decisiontree.TreeState' - --- for CaRTree inference -require 'decisiontree.CartNode' -require 'decisiontree.CartTree' - --- Criterions (extended with updateHessInput and backward2) -require 'decisiontree.MSECriterion' -require 'decisiontree.LogitBoostCriterion' - --- Used by both RandomForestTrainer and GradientBoostTrainer -require 'decisiontree.CartTrainer' - --- Used by CartTrainer -require 'decisiontree.DataSet' - --- Random Forest Training -require 'decisiontree.RandomForestTrainer' -require 'decisiontree.GiniState' -- TreeState subclass - --- Gradient Boosted Decision Tree Training -require 'decisiontree.GradientBoostTrainer' -require 'decisiontree.GradientBoostState' -- TreeState subclass - --- unit tests and benchmarks ---require 'decisiontree.test' ---require 'decisiontree.benchmark' - --- nn.Module -require 'decisiontree.DFD' -require 'decisiontree.Sparse2Dense' - -return dt diff --git a/contrib/lua-torch/decisiontree/internal_hash_map.h b/contrib/lua-torch/decisiontree/internal_hash_map.h deleted file mode 100644 index bc8c523ef8..0000000000 --- a/contrib/lua-torch/decisiontree/internal_hash_map.h +++ /dev/null @@ -1,13 +0,0 @@ -#include "khash.h" -#include - -KHASH_MAP_INIT_INT64(long, long) -typedef khash_t(long)* internal_hash_map_t; - -typedef struct { - hash_map_t h; - int refcount; - pthread_mutex_t mutex; - int autolock; - long counter; -} hash_map_lua_t; diff --git a/contrib/lua-torch/decisiontree/khash.h b/contrib/lua-torch/decisiontree/khash.h deleted file mode 100644 index 20e9940635..0000000000 --- a/contrib/lua-torch/decisiontree/khash.h +++ /dev/null @@ -1,627 +0,0 @@ -/* The MIT License - - Copyright (c) 2008, 2009, 2011 by Attractive Chaos - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. -*/ - -/* - An example: - -#include "khash.h" -KHASH_MAP_INIT_INT(32, char) -int main() { - int ret, is_missing; - khiter_t k; - khash_t(32) *h = kh_init(32); - k = kh_put(32, h, 5, &ret); - kh_value(h, k) = 10; - k = kh_get(32, h, 10); - is_missing = (k == kh_end(h)); - k = kh_get(32, h, 5); - kh_del(32, h, k); - for (k = kh_begin(h); k != kh_end(h); ++k) - if (kh_exist(h, k)) kh_value(h, k) = 1; - kh_destroy(32, h); - return 0; -} -*/ - -/* - 2013-05-02 (0.2.8): - - * Use quadratic probing. When the capacity is power of 2, stepping function - i*(i+1)/2 guarantees to traverse each bucket. It is better than double - hashing on cache performance and is more robust than linear probing. - - In theory, double hashing should be more robust than quadratic probing. - However, my implementation is probably not for large hash tables, because - the second hash function is closely tied to the first hash function, - which reduce the effectiveness of double hashing. - - Reference: http://research.cs.vt.edu/AVresearch/hashing/quadratic.php - - 2011-12-29 (0.2.7): - - * Minor code clean up; no actual effect. - - 2011-09-16 (0.2.6): - - * The capacity is a power of 2. This seems to dramatically improve the - speed for simple keys. Thank Zilong Tan for the suggestion. Reference: - - - http://code.google.com/p/ulib/ - - http://nothings.org/computer/judy/ - - * Allow to optionally use linear probing which usually has better - performance for random input. Double hashing is still the default as it - is more robust to certain non-random input. - - * Added Wang's integer hash function (not used by default). This hash - function is more robust to certain non-random input. - - 2011-02-14 (0.2.5): - - * Allow to declare global functions. - - 2009-09-26 (0.2.4): - - * Improve portability - - 2008-09-19 (0.2.3): - - * Corrected the example - * Improved interfaces - - 2008-09-11 (0.2.2): - - * Improved speed a little in kh_put() - - 2008-09-10 (0.2.1): - - * Added kh_clear() - * Fixed a compiling error - - 2008-09-02 (0.2.0): - - * Changed to token concatenation which increases flexibility. - - 2008-08-31 (0.1.2): - - * Fixed a bug in kh_get(), which has not been tested previously. - - 2008-08-31 (0.1.1): - - * Added destructor -*/ - - -#ifndef __AC_KHASH_H -#define __AC_KHASH_H - -/*! - @header - - Generic hash table library. - */ - -#define AC_VERSION_KHASH_H "0.2.8" - -#include -#include -#include - -/* compiler specific configuration */ - -#if UINT_MAX == 0xffffffffu -typedef unsigned int khint32_t; -#elif ULONG_MAX == 0xffffffffu -typedef unsigned long khint32_t; -#endif - -#if ULONG_MAX == ULLONG_MAX -typedef unsigned long khint64_t; -#else -typedef unsigned long long khint64_t; -#endif - -#ifndef kh_inline -#ifdef _MSC_VER -#define kh_inline __inline -#else -#define kh_inline inline -#endif -#endif /* kh_inline */ - -#ifndef klib_unused -#if (defined __clang__ && __clang_major__ >= 3) || (defined __GNUC__ && __GNUC__ >= 3) -#define klib_unused __attribute__ ((__unused__)) -#else -#define klib_unused -#endif -#endif /* klib_unused */ - -typedef khint32_t khint_t; -typedef khint_t khiter_t; - -#define __ac_isempty(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&2) -#define __ac_isdel(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&1) -#define __ac_iseither(flag, i) ((flag[i>>4]>>((i&0xfU)<<1))&3) -#define __ac_set_isdel_false(flag, i) (flag[i>>4]&=~(1ul<<((i&0xfU)<<1))) -#define __ac_set_isempty_false(flag, i) (flag[i>>4]&=~(2ul<<((i&0xfU)<<1))) -#define __ac_set_isboth_false(flag, i) (flag[i>>4]&=~(3ul<<((i&0xfU)<<1))) -#define __ac_set_isdel_true(flag, i) (flag[i>>4]|=1ul<<((i&0xfU)<<1)) - -#define __ac_fsize(m) ((m) < 16? 1 : (m)>>4) - -#ifndef kroundup32 -#define kroundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x)) -#endif - -#ifndef kcalloc -#define kcalloc(N,Z) calloc(N,Z) -#endif -#ifndef kmalloc -#define kmalloc(Z) malloc(Z) -#endif -#ifndef krealloc -#define krealloc(P,Z) realloc(P,Z) -#endif -#ifndef kfree -#define kfree(P) free(P) -#endif - -static const double __ac_HASH_UPPER = 0.77; - -#define __KHASH_TYPE(name, khkey_t, khval_t) \ - typedef struct kh_##name##_s { \ - khint_t n_buckets, size, n_occupied, upper_bound; \ - khint32_t *flags; \ - khkey_t *keys; \ - khval_t *vals; \ - } kh_##name##_t; - -#define __KHASH_PROTOTYPES(name, khkey_t, khval_t) \ - extern kh_##name##_t *kh_init_##name(void); \ - extern void kh_destroy_##name(kh_##name##_t *h); \ - extern void kh_clear_##name(kh_##name##_t *h); \ - extern khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key); \ - extern int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets); \ - extern khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret); \ - extern void kh_del_##name(kh_##name##_t *h, khint_t x); - -#define __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ - SCOPE kh_##name##_t *kh_init_##name(void) { \ - return (kh_##name##_t*)kcalloc(1, sizeof(kh_##name##_t)); \ - } \ - SCOPE void kh_destroy_##name(kh_##name##_t *h) \ - { \ - if (h) { \ - kfree((void *)h->keys); kfree(h->flags); \ - kfree((void *)h->vals); \ - kfree(h); \ - } \ - } \ - SCOPE void kh_clear_##name(kh_##name##_t *h) \ - { \ - if (h && h->flags) { \ - memset(h->flags, 0xaa, __ac_fsize(h->n_buckets) * sizeof(khint32_t)); \ - h->size = h->n_occupied = 0; \ - } \ - } \ - SCOPE khint_t kh_get_##name(const kh_##name##_t *h, khkey_t key) \ - { \ - if (h->n_buckets) { \ - khint_t k, i, last, mask, step = 0; \ - mask = h->n_buckets - 1; \ - k = __hash_func(key); i = k & mask; \ - last = i; \ - while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \ - i = (i + (++step)) & mask; \ - if (i == last) return h->n_buckets; \ - } \ - return __ac_iseither(h->flags, i)? h->n_buckets : i; \ - } else return 0; \ - } \ - SCOPE int kh_resize_##name(kh_##name##_t *h, khint_t new_n_buckets) \ - { /* This function uses 0.25*n_buckets bytes of working space instead of [sizeof(key_t+val_t)+.25]*n_buckets. */ \ - khint32_t *new_flags = 0; \ - khint_t j = 1; \ - { \ - kroundup32(new_n_buckets); \ - if (new_n_buckets < 4) new_n_buckets = 4; \ - if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \ - else { /* hash table size to be changed (shrink or expand); rehash */ \ - new_flags = (khint32_t*)kmalloc(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \ - if (!new_flags) return -1; \ - memset(new_flags, 0xaa, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \ - if (h->n_buckets < new_n_buckets) { /* expand */ \ - khkey_t *new_keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \ - if (!new_keys) { kfree(new_flags); return -1; } \ - h->keys = new_keys; \ - if (kh_is_map) { \ - khval_t *new_vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \ - if (!new_vals) { kfree(new_flags); return -1; } \ - h->vals = new_vals; \ - } \ - } /* otherwise shrink */ \ - } \ - } \ - if (j) { /* rehashing is needed */ \ - for (j = 0; j != h->n_buckets; ++j) { \ - if (__ac_iseither(h->flags, j) == 0) { \ - khkey_t key = h->keys[j]; \ - khval_t val; \ - khint_t new_mask; \ - new_mask = new_n_buckets - 1; \ - if (kh_is_map) val = h->vals[j]; \ - __ac_set_isdel_true(h->flags, j); \ - while (1) { /* kick-out process; sort of like in Cuckoo hashing */ \ - khint_t k, i, step = 0; \ - k = __hash_func(key); \ - i = k & new_mask; \ - while (!__ac_isempty(new_flags, i)) i = (i + (++step)) & new_mask; \ - __ac_set_isempty_false(new_flags, i); \ - if (i < h->n_buckets && __ac_iseither(h->flags, i) == 0) { /* kick out the existing element */ \ - { khkey_t tmp = h->keys[i]; h->keys[i] = key; key = tmp; } \ - if (kh_is_map) { khval_t tmp = h->vals[i]; h->vals[i] = val; val = tmp; } \ - __ac_set_isdel_true(h->flags, i); /* mark it as deleted in the old hash table */ \ - } else { /* write the element and jump out of the loop */ \ - h->keys[i] = key; \ - if (kh_is_map) h->vals[i] = val; \ - break; \ - } \ - } \ - } \ - } \ - if (h->n_buckets > new_n_buckets) { /* shrink the hash table */ \ - h->keys = (khkey_t*)krealloc((void *)h->keys, new_n_buckets * sizeof(khkey_t)); \ - if (kh_is_map) h->vals = (khval_t*)krealloc((void *)h->vals, new_n_buckets * sizeof(khval_t)); \ - } \ - kfree(h->flags); /* free the working space */ \ - h->flags = new_flags; \ - h->n_buckets = new_n_buckets; \ - h->n_occupied = h->size; \ - h->upper_bound = (khint_t)(h->n_buckets * __ac_HASH_UPPER + 0.5); \ - } \ - return 0; \ - } \ - SCOPE khint_t kh_put_##name(kh_##name##_t *h, khkey_t key, int *ret) \ - { \ - khint_t x; \ - if (h->n_occupied >= h->upper_bound) { /* update the hash table */ \ - if (h->n_buckets > (h->size<<1)) { \ - if (kh_resize_##name(h, h->n_buckets - 1) < 0) { /* clear "deleted" elements */ \ - *ret = -1; return h->n_buckets; \ - } \ - } else if (kh_resize_##name(h, h->n_buckets + 1) < 0) { /* expand the hash table */ \ - *ret = -1; return h->n_buckets; \ - } \ - } /* TODO: to implement automatically shrinking; resize() already support shrinking */ \ - { \ - khint_t k, i, site, last, mask = h->n_buckets - 1, step = 0; \ - x = site = h->n_buckets; k = __hash_func(key); i = k & mask; \ - if (__ac_isempty(h->flags, i)) x = i; /* for speed up */ \ - else { \ - last = i; \ - while (!__ac_isempty(h->flags, i) && (__ac_isdel(h->flags, i) || !__hash_equal(h->keys[i], key))) { \ - if (__ac_isdel(h->flags, i)) site = i; \ - i = (i + (++step)) & mask; \ - if (i == last) { x = site; break; } \ - } \ - if (x == h->n_buckets) { \ - if (__ac_isempty(h->flags, i) && site != h->n_buckets) x = site; \ - else x = i; \ - } \ - } \ - } \ - if (__ac_isempty(h->flags, x)) { /* not present at all */ \ - h->keys[x] = key; \ - __ac_set_isboth_false(h->flags, x); \ - ++h->size; ++h->n_occupied; \ - *ret = 1; \ - } else if (__ac_isdel(h->flags, x)) { /* deleted */ \ - h->keys[x] = key; \ - __ac_set_isboth_false(h->flags, x); \ - ++h->size; \ - *ret = 2; \ - } else *ret = 0; /* Don't touch h->keys[x] if present and not deleted */ \ - return x; \ - } \ - SCOPE void kh_del_##name(kh_##name##_t *h, khint_t x) \ - { \ - if (x != h->n_buckets && !__ac_iseither(h->flags, x)) { \ - __ac_set_isdel_true(h->flags, x); \ - --h->size; \ - } \ - } - -#define KHASH_DECLARE(name, khkey_t, khval_t) \ - __KHASH_TYPE(name, khkey_t, khval_t) \ - __KHASH_PROTOTYPES(name, khkey_t, khval_t) - -#define KHASH_INIT2(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ - __KHASH_TYPE(name, khkey_t, khval_t) \ - __KHASH_IMPL(name, SCOPE, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) - -#define KHASH_INIT(name, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) \ - KHASH_INIT2(name, static kh_inline klib_unused, khkey_t, khval_t, kh_is_map, __hash_func, __hash_equal) - -/* --- BEGIN OF HASH FUNCTIONS --- */ - -/*! @function - @abstract Integer hash function - @param key The integer [khint32_t] - @return The hash value [khint_t] - */ -#define kh_int_hash_func(key) (khint32_t)(key) -/*! @function - @abstract Integer comparison function - */ -#define kh_int_hash_equal(a, b) ((a) == (b)) -/*! @function - @abstract 64-bit integer hash function - @param key The integer [khint64_t] - @return The hash value [khint_t] - */ -#define kh_int64_hash_func(key) (khint32_t)((key)>>33^(key)^(key)<<11) -/*! @function - @abstract 64-bit integer comparison function - */ -#define kh_int64_hash_equal(a, b) ((a) == (b)) -/*! @function - @abstract const char* hash function - @param s Pointer to a null terminated string - @return The hash value - */ -static kh_inline khint_t __ac_X31_hash_string(const char *s) -{ - khint_t h = (khint_t)*s; - if (h) for (++s ; *s; ++s) h = (h << 5) - h + (khint_t)*s; - return h; -} -/*! @function - @abstract Another interface to const char* hash function - @param key Pointer to a null terminated string [const char*] - @return The hash value [khint_t] - */ -#define kh_str_hash_func(key) __ac_X31_hash_string(key) -/*! @function - @abstract Const char* comparison function - */ -#define kh_str_hash_equal(a, b) (strcmp(a, b) == 0) - -static kh_inline khint_t __ac_Wang_hash(khint_t key) -{ - key += ~(key << 15); - key ^= (key >> 10); - key += (key << 3); - key ^= (key >> 6); - key += ~(key << 11); - key ^= (key >> 16); - return key; -} -#define kh_int_hash_func2(key) __ac_Wang_hash((khint_t)key) - -/* --- END OF HASH FUNCTIONS --- */ - -/* Other convenient macros... */ - -/*! - @abstract Type of the hash table. - @param name Name of the hash table [symbol] - */ -#define khash_t(name) kh_##name##_t - -/*! @function - @abstract Initiate a hash table. - @param name Name of the hash table [symbol] - @return Pointer to the hash table [khash_t(name)*] - */ -#define kh_init(name) kh_init_##name() - -/*! @function - @abstract Destroy a hash table. - @param name Name of the hash table [symbol] - @param h Pointer to the hash table [khash_t(name)*] - */ -#define kh_destroy(name, h) kh_destroy_##name(h) - -/*! @function - @abstract Reset a hash table without deallocating memory. - @param name Name of the hash table [symbol] - @param h Pointer to the hash table [khash_t(name)*] - */ -#define kh_clear(name, h) kh_clear_##name(h) - -/*! @function - @abstract Resize a hash table. - @param name Name of the hash table [symbol] - @param h Pointer to the hash table [khash_t(name)*] - @param s New size [khint_t] - */ -#define kh_resize(name, h, s) kh_resize_##name(h, s) - -/*! @function - @abstract Insert a key to the hash table. - @param name Name of the hash table [symbol] - @param h Pointer to the hash table [khash_t(name)*] - @param k Key [type of keys] - @param r Extra return code: -1 if the operation failed; - 0 if the key is present in the hash table; - 1 if the bucket is empty (never used); 2 if the element in - the bucket has been deleted [int*] - @return Iterator to the inserted element [khint_t] - */ -#define kh_put(name, h, k, r) kh_put_##name(h, k, r) - -/*! @function - @abstract Retrieve a key from the hash table. - @param name Name of the hash table [symbol] - @param h Pointer to the hash table [khash_t(name)*] - @param k Key [type of keys] - @return Iterator to the found element, or kh_end(h) if the element is absent [khint_t] - */ -#define kh_get(name, h, k) kh_get_##name(h, k) - -/*! @function - @abstract Remove a key from the hash table. - @param name Name of the hash table [symbol] - @param h Pointer to the hash table [khash_t(name)*] - @param k Iterator to the element to be deleted [khint_t] - */ -#define kh_del(name, h, k) kh_del_##name(h, k) - -/*! @function - @abstract Test whether a bucket contains data. - @param h Pointer to the hash table [khash_t(name)*] - @param x Iterator to the bucket [khint_t] - @return 1 if containing data; 0 otherwise [int] - */ -#define kh_exist(h, x) (!__ac_iseither((h)->flags, (x))) - -/*! @function - @abstract Get key given an iterator - @param h Pointer to the hash table [khash_t(name)*] - @param x Iterator to the bucket [khint_t] - @return Key [type of keys] - */ -#define kh_key(h, x) ((h)->keys[x]) - -/*! @function - @abstract Get value given an iterator - @param h Pointer to the hash table [khash_t(name)*] - @param x Iterator to the bucket [khint_t] - @return Value [type of values] - @discussion For hash sets, calling this results in segfault. - */ -#define kh_val(h, x) ((h)->vals[x]) - -/*! @function - @abstract Alias of kh_val() - */ -#define kh_value(h, x) ((h)->vals[x]) - -/*! @function - @abstract Get the start iterator - @param h Pointer to the hash table [khash_t(name)*] - @return The start iterator [khint_t] - */ -#define kh_begin(h) (khint_t)(0) - -/*! @function - @abstract Get the end iterator - @param h Pointer to the hash table [khash_t(name)*] - @return The end iterator [khint_t] - */ -#define kh_end(h) ((h)->n_buckets) - -/*! @function - @abstract Get the number of elements in the hash table - @param h Pointer to the hash table [khash_t(name)*] - @return Number of elements in the hash table [khint_t] - */ -#define kh_size(h) ((h)->size) - -/*! @function - @abstract Get the number of buckets in the hash table - @param h Pointer to the hash table [khash_t(name)*] - @return Number of buckets in the hash table [khint_t] - */ -#define kh_n_buckets(h) ((h)->n_buckets) - -/*! @function - @abstract Iterate over the entries in the hash table - @param h Pointer to the hash table [khash_t(name)*] - @param kvar Variable to which key will be assigned - @param vvar Variable to which value will be assigned - @param code Block of code to execute - */ -#define kh_foreach(h, kvar, vvar, code) { khint_t __i; \ - for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \ - if (!kh_exist(h,__i)) continue; \ - (kvar) = kh_key(h,__i); \ - (vvar) = kh_val(h,__i); \ - code; \ - } } - -/*! @function - @abstract Iterate over the values in the hash table - @param h Pointer to the hash table [khash_t(name)*] - @param vvar Variable to which value will be assigned - @param code Block of code to execute - */ -#define kh_foreach_value(h, vvar, code) { khint_t __i; \ - for (__i = kh_begin(h); __i != kh_end(h); ++__i) { \ - if (!kh_exist(h,__i)) continue; \ - (vvar) = kh_val(h,__i); \ - code; \ - } } - -/* More conenient interfaces */ - -/*! @function - @abstract Instantiate a hash set containing integer keys - @param name Name of the hash table [symbol] - */ -#define KHASH_SET_INIT_INT(name) \ - KHASH_INIT(name, khint32_t, char, 0, kh_int_hash_func, kh_int_hash_equal) - -/*! @function - @abstract Instantiate a hash map containing integer keys - @param name Name of the hash table [symbol] - @param khval_t Type of values [type] - */ -#define KHASH_MAP_INIT_INT(name, khval_t) \ - KHASH_INIT(name, khint32_t, khval_t, 1, kh_int_hash_func, kh_int_hash_equal) - -/*! @function - @abstract Instantiate a hash map containing 64-bit integer keys - @param name Name of the hash table [symbol] - */ -#define KHASH_SET_INIT_INT64(name) \ - KHASH_INIT(name, khint64_t, char, 0, kh_int64_hash_func, kh_int64_hash_equal) - -/*! @function - @abstract Instantiate a hash map containing 64-bit integer keys - @param name Name of the hash table [symbol] - @param khval_t Type of values [type] - */ -#define KHASH_MAP_INIT_INT64(name, khval_t) \ - KHASH_INIT(name, khint64_t, khval_t, 1, kh_int64_hash_func, kh_int64_hash_equal) - -typedef const char *kh_cstr_t; -/*! @function - @abstract Instantiate a hash map containing const char* keys - @param name Name of the hash table [symbol] - */ -#define KHASH_SET_INIT_STR(name) \ - KHASH_INIT(name, kh_cstr_t, char, 0, kh_str_hash_func, kh_str_hash_equal) - -/*! @function - @abstract Instantiate a hash map containing const char* keys - @param name Name of the hash table [symbol] - @param khval_t Type of values [type] - */ -#define KHASH_MAP_INIT_STR(name, khval_t) \ - KHASH_INIT(name, kh_cstr_t, khval_t, 1, kh_str_hash_func, kh_str_hash_equal) - -#endif /* __AC_KHASH_H */ diff --git a/contrib/lua-torch/decisiontree/math.lua b/contrib/lua-torch/decisiontree/math.lua deleted file mode 100644 index eb71b31edb..0000000000 --- a/contrib/lua-torch/decisiontree/math.lua +++ /dev/null @@ -1,84 +0,0 @@ -local dt = require "decisiontree._env" - -local PSEUDOCOUNT = 1.0 -local MIN_LOGISTIC = 1E-8 -local MAX_LOGISTIC = 1.0 - MIN_LOGISTIC - --- Create counts of possible results (last column of each row is the result) -function dt.uniquecounts(counts, inputset, nclass) - counts = counts or inputset.input.new() - nclass = nclass or inputset.target:max() - counts:resize(nclass):zero() - - inputset.target:apply(function(c) counts[c] = counts[c] + 1 end) - return counts -end - --- Entropy is the sum of -p(x)log(p(x)) across all the different possible results -local counts, logprobs -function dt.entropy(inputset, nclass) - local dt = require 'decisiontree' - counts = dt.uniquecounts(counts, inputset, nclass) - -- convert counts to categorical probabilities - counts:add(0.0000001) -- prevent NaN - counts:div(counts:sum()) - - logprobs = logprobs or counts.new() - logprobs:resize(counts:size()) - logprobs:log(counts):div(math.log(2)) -- log2(x) - - counts:cmul(logprobs) - - return -counts:sum() -end - --- Compute and return the probability of positive label. -function dt.probabilityPositive(nPositive, nTotal) - return (nPositive + PSEUDOCOUNT) / (nTotal + 2.0 * PSEUDOCOUNT); -end - --- Ref. https://en.wikipedia.org/wiki/Logit --- Calculates logit of the probability. --- Logit represents the log-odds. Probabilities transformed to logit 'space' can be combined linearly. -function dt.logit(p) - assert(p >= 0.0 and p <= 1.0, "Expecting probability for arg 1") - local truncatedP = math.max(MIN_LOGISTIC, math.min(MAX_LOGISTIC, p)) - return math.log(truncatedP / (1.0 - truncatedP)) -end - -function dt.logistic(x) - return (x >= 0) and (1 / (1 + math.exp(-x))) or (1 - 1 / (1 + math.exp(x))) -end - -function dt.computeGradientBoostLoss(gradient, hessian) - return -gradient * gradient / hessian -end - -function dt.computeNewtonScore(gradient, hessian) - return -0.5 * gradient / hessian; -end - --- Calculates the logit score for a Node in a Decision Tree based on the probability of a positive label. --- params: number of positive examples and total number of examples. -function dt.calculateLogitScore(nPositive, nTotal) - local dt = require 'decisiontree' - return dt.logit(dt.probabilityPositive(nPositive, nTotal)) -end - --- Compute and return the Gini impurity score based on an input contingency table. -function dt.computeGini(leftCount, positiveLeftCount, rightCount, positiveRightCount) - assert(torch.type(leftCount) == 'number', 'Expecting total number examples falling into leftBranch.') - assert(torch.type(positiveLeftCount) == 'number', 'Expecting total number of positive examples falling into left branch.') - assert(torch.type(rightCount) == 'number', 'Expecting total number of examples falling into the right branch.') - assert(torch.type(positiveRightCount) == 'number', 'Expecting total number of positive examples falling into the right branch.') - - local total = leftCount + rightCount - - local pPositiveLeft = leftCount == 0 and 0 or (positiveLeftCount / leftCount) - local leftGini = pPositiveLeft * (1.0 - pPositiveLeft) - - local pPositiveRight = rightCount == 0 and 0 or (positiveRightCount / rightCount) - local rightGini = pPositiveRight * (1.0 - pPositiveRight) - - return (leftCount * leftGini + rightCount * rightGini) / total -end \ No newline at end of file diff --git a/contrib/lua-torch/decisiontree/rocks/decisiontree-scm-1.rockspec b/contrib/lua-torch/decisiontree/rocks/decisiontree-scm-1.rockspec deleted file mode 100644 index d5d9162757..0000000000 --- a/contrib/lua-torch/decisiontree/rocks/decisiontree-scm-1.rockspec +++ /dev/null @@ -1,40 +0,0 @@ -package = "decisiontree" -version = "scm-1" - -source = { - url = "git://github.com/Twitter/decisiontree", - tag = "master" -} - -description = { - summary = "Decision trees for Torch by Twitter", - detailed = [[ - Classification and regression trees (CART). - Gradients boosted decision trees (GBDT). - ]], - homepage = "https://github.com/Twitter/decisiontree", - license = "BSD" -} - -dependencies = { - "torch >= 7.0", - "moses >= 1.3.1", - "xlua >= 1.0", - "image >= 1.0", - "luafilesystem >= 1.6.2", - "sys >= 1.1", - "paths >= 1.0", - "ipc >= 1.0", - "nn >= 1.0" -} - -build = { - type = "command", - build_command = [[ -cmake -E make_directory build; -cd build; -cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_PREFIX_PATH="$(LUA_BINDIR)/.." -DCMAKE_INSTALL_PREFIX="$(PREFIX)" -DCMAKE_C_FLAGS=-fPIC -DCMAKE_CXX_FLAGS=-fPIC; -$(MAKE) - ]], - install_command = "cd build && $(MAKE) install" -} diff --git a/contrib/lua-torch/decisiontree/test.lua b/contrib/lua-torch/decisiontree/test.lua deleted file mode 100644 index 80510a4f66..0000000000 --- a/contrib/lua-torch/decisiontree/test.lua +++ /dev/null @@ -1,817 +0,0 @@ -local dt = require "decisiontree._env" - -local dttest = {} -local nloop = 50 -local epsilon = 0.000001 -local mytester - ---e.g. usage: th -e "dt = require 'decisiontree'; dt.test()" - --- test 99% accuracy -local function testAccuracy(cartTree, name, dataset, minacc) - assert(torch.isTypeOf(dataset, 'dt.DataSet')) - minacc = minacc or 0.99 - local output = torch.Tensor(dataset:size()) - local target, input = dataset.target, dataset.input - - for i=1,dataset:size() do - local stack = {} - local score = cartTree:score(input[i], stack) - output[i] = score >= 0 and 1 or 0 - - if dt.VERBOSE and torch.type(cartTree) == 'dt.CartTree' and target[i] ~= output[i] then - print(cartTree:stackToString(stack, example.input)) - print(i, score, target[i], output[i]) - end - end - - local accuracy = torch.eq(target, output):float():mean() - mytester:assert(accuracy >= minacc, name .. ": insufficient accuracy: " .. accuracy .. " < " .. minacc) -end - -function dttest.SparseTensor() - local keys = torch.LongTensor{1,5,6,10} - local values = torch.randn(keys:size(1)) - local st = torch.SparseTensor(keys, values) - - mytester:assert(st[1] == values[1]) - mytester:assert(st[5] == values[2]) - mytester:assert(st[6] == values[3]) - mytester:assert(st[10] == values[4]) - - mytester:assert(st[2] == nil) - - st:buildIndex() - - mytester:assert(st[1] == values[1]) - mytester:assert(st[5] == values[2]) - mytester:assert(st[6] == values[3]) - mytester:assert(st[10] == values[4]) - - mytester:assert(st[2] == nil) - - -- test empty sparse tensor - - local est = torch.SparseTensor() -end - -function dttest.GiniState() - local featureId = 2 - local minLeafSize = 0 - - local input = torch.Tensor({{0,1,0},{0,2,0},{0,3,0}}) - local target = torch.Tensor({1, 1, 1}) - local dataset = dt.DataSet(input, target, 3) - - local splitInfo1 = {_id=1} - local splitInfo2 = {_id=2, leftChildSize = 1, rightChildSize = 2, splitGain = 0} - local splitInfo3 = {_id=3, leftChildSize = 2, rightChildSize = 1, splitGain = -1} - - local exampleIds = torch.LongTensor{1,2,3} - - local treeState = dt.GiniState(exampleIds) - - function treeState.computeSplitInfo(self, splitFeatureId, splitFeatureValue) - if splitFeatureId == featureId and splitFeatureValue == 2 then - return splitInfo2 - elseif splitFeatureId == featureId and splitFeatureValue == 3 then - return splitInfo3 - else - error("Unhandled computeSplitInfo call "..splitFeatureId.." "..splitFeatureValue) - end - end - - local splitInfo = treeState:findBestFeatureSplit(dataset, featureId, minLeafSize) - mytester:assert(splitInfo._id == splitInfo3._id) -end - -function dttest.CartTree() - - local splitFeatureId = 100 - local splitFeatureValue = 1.0 - - local function getBinaryCartTreeRootNode() - - local leftNodeScore = 0.2 - local rightNodeScore = 0.4 - - local rootNode = dt.CartNode() - rootNode.nodeId = 0 - rootNode.score = 0.5 - rootNode.splitFeatureId = splitFeatureId - rootNode.splitFeautreValue = splitFeatureValue - - local leftChild = dt.CartNode() - leftChild.score = leftNodeScore - leftChild.nodeId = 1 - - local rightChild = dt.CartNode() - rightChild.score = rightNodeScore - rightChild.nodeId = 2 - - rootNode.leftChild = leftChild - rootNode.rightChild = rightChild - - return rootNode - end - - local function testScoreCartTreeBranchLeftIfMissing() - local rootNode = getBinaryCartTreeRootNode() - - local cartTree = dt.CartTree(rootNode) - - local continuousFeatures = torch.SparseTensor() - - local score, nodeId = cartTree:score(continuousFeatures) - - mytester:assert(math.abs(rootNode.leftChild.score - score) < epsilon) - mytester:assert(rootNode.leftChild.nodeId == nodeId) - end - - local function testBranchRightWithFeature() - local rootNode = getBinaryCartTreeRootNode() - - local cartTree = dt.CartTree(rootNode) - - local continuousFeatures = torch.zeros(100) - continuousFeatures[splitFeatureId] = splitFeatureValue - - local score, nodeId = cartTree:score(continuousFeatures) - - mytester:assert(math.abs(rootNode.rightChild.score - score) < epsilon) - mytester:assert(rootNode.rightChild.nodeId == nodeId) - end - - local function testMissingRightNode() - local rootNode = getBinaryCartTreeRootNode() - - rootNode.rightChild = nil - - local cartTree = dt.CartTree(rootNode) - - local continuousFeatures = torch.Tensor() - - local score, nodeId = cartTree:score(continuousFeatures) - - mytester:assert(math.abs(rootNode.leftChild.score - score) < epsilon) - mytester:assert(rootNode.leftChild.nodeId == nodeId) - end - - local function testMissingLeftNode() - local rootNode = getBinaryCartTreeRootNode() - - rootNode.leftChild = nil - - local cartTree = dt.CartTree(rootNode) - - local continuousFeatures = torch.Tensor() - - local score, nodeId = cartTree:score(continuousFeatures) - - mytester:assert(math.abs(rootNode.rightChild.score - score) < epsilon) - mytester:assert(rootNode.rightChild.nodeId == nodeId) - end - - local function testMissingAllChildren() - local rootNode = getBinaryCartTreeRootNode() - - rootNode.leftChild = nil - rootNode.rightChild = nil - - local cartTree = dt.CartTree(rootNode) - - local continuousFeatures = torch.Tensor() - - local score, nodeId = cartTree:score(continuousFeatures) - - mytester:assert(math.abs(rootNode.score - score) < epsilon) - mytester:assert(rootNode.nodeId == nodeId) - end - - local function testScoreCartTreeBranchRandomlyRight() - local rootNode = getBinaryCartTreeRootNode(); - - -- Force Branch Right - local cartTree = dt.CartTree(rootNode, function() return false end); - - local continuousFeatures = torch.SparseTensor() - - local score, nodeId = cartTree:score(continuousFeatures) - - mytester:assert(math.abs(rootNode.rightChild.score - score) < epsilon) - mytester:assert(rootNode.rightChild.nodeId == nodeId) - end - - local function testScoreCartTreeBranchRandomlyLeft() - local rootNode = getBinaryCartTreeRootNode(); - - -- Force Branch Left - local cartTree = dt.CartTree(rootNode, function() return true end); - - local continuousFeatures = torch.SparseTensor() - - local score, nodeId = cartTree:score(continuousFeatures) - - mytester:assert(math.abs(rootNode.leftChild.score - score) < epsilon) - mytester:assert(rootNode.leftChild.nodeId == nodeId) - end - - testScoreCartTreeBranchLeftIfMissing() - testBranchRightWithFeature() - testMissingRightNode() - testMissingLeftNode() - testMissingAllChildren() - testScoreCartTreeBranchRandomlyRight() - testScoreCartTreeBranchRandomlyLeft() - -end - -function dttest.TreeState_branch() - local _ = require 'moses' - local binFeatureId = 1 - local featureId = 2 - - local input = { - torch.SparseTensor(torch.LongTensor{binFeatureId},torch.Tensor{1}), - torch.SparseTensor(torch.LongTensor{binFeatureId,featureId},torch.Tensor{1,1}), - torch.SparseTensor(torch.LongTensor{binFeatureId,featureId},torch.Tensor{0,2}), - torch.SparseTensor(torch.LongTensor{binFeatureId,featureId},torch.Tensor{0,3}) - } - local target = torch.LongTensor(4):fill(1) - - local dataset = dt.DataSet(input, target) - - local treeState = dt.TreeState(torch.LongTensor():range(1,4)) - local splitInfo = {splitId = binFeatureId, splitValue = 1} - - local function testBranchBinaryFeature() - splitInfo = {splitId = binFeatureId, splitValue = 1} - local leftBranch, rightBranch = treeState:branch(splitInfo, dataset) - mytester:assert(leftBranch ~= nil and rightBranch ~= nil) - - mytester:assert(2 == leftBranch:size()) - mytester:assert(leftBranch:contains(3)) - mytester:assert(leftBranch:contains(4)) - - mytester:assert(2 == rightBranch:size()) - mytester:assert(rightBranch:contains(1)) - mytester:assert(rightBranch:contains(2)) - end - - local function testBranchContinuousFeature() - local splitValue = 2 - splitInfo = {splitId = featureId, splitValue = splitValue} - - local leftBranch, rightBranch = treeState:branch(splitInfo, dataset) - mytester:assert(leftBranch ~= nil and rightBranch ~= nil) - - mytester:assert(1 == leftBranch:size()) - mytester:assert(leftBranch:contains(2)) - - mytester:assert(2 == rightBranch:size()) - mytester:assert(rightBranch:contains(3)) - mytester:assert(rightBranch:contains(4)) - end - - testBranchBinaryFeature() - testBranchContinuousFeature() - -end - -function dttest.DecisionForest() - -- Create test decision forest, each forest has only a single node, and returns score == score of root node. - - local function createCartTreeWithSingleNode(score) - local cartNode = dt.CartNode() - cartNode.score = score - return dt.CartTree(cartNode) - end - - local function getTestDecisionForest() - local cartTrees = { - createCartTreeWithSingleNode(1), - createCartTreeWithSingleNode(2), - createCartTreeWithSingleNode(3) - } - local weight = torch.Tensor{10,20,30} - local bias = 0.5 - - return dt.DecisionForest(cartTrees, weight, bias) - end - - local function testScoreDecisionForest() - local df = getTestDecisionForest() - local continuousFeatures = torch.SparseTensor() - - local expectedResult = 1.0 * 10.0 + 2.0 * 20.0 + 3.0 * 30.0 + 0.5; - local result = df:score(continuousFeatures) - - mytester:assert(math.abs(expectedResult - result) < epsilon) - end - - testScoreDecisionForest() -end - -function dttest.CartTrainer() - local minLeafSize, maxLeafNodes = 1, 1000 - local nExample = 100 - - -- 1. dense dataset - local trainSet, validSet, clusterExamples, inputs, targets = dt.getDenseDummyData(nExample) - - -- assert that the dataset is valid - for clusterId, exampleIds in ipairs(clusterExamples) do - local exampleIdx = torch.LongTensor(exampleIds) - local input = inputs:index(1,exampleIdx) - local target = targets:index(1,exampleIdx) - assert(input:std(1):mean() < 0.05) - end - - local cartTrainer = dt.CartTrainer(trainSet, minLeafSize, maxLeafNodes) - local treeState = dt.GiniState(trainSet:getExampleIds()) - local cartTree, nleaf = cartTrainer:train(treeState, trainSet.featureIds) - - mytester:assert(nleaf == nExample) -- for dense inputs, minLeafSize =1 and maxLeafNode = inf, this is true - testAccuracy(cartTree, "dense train single-thread first", trainSet, 0.99) - testAccuracy(cartTree, "dense valid single-thread first", validSet, 0.7) -- they don't generalize very well.. - - local cartTree, nleaf = cartTrainer:train(treeState, trainSet.featureIds) - testAccuracy(cartTree, "dense single-thread second", trainSet) - - -- test feature parallelization - local nThread = 2 - cartTrainer:featureParallel(nThread) - local treeState = dt.GiniState(trainSet:getExampleIds()) - local cartTree, nleaf = cartTrainer:train(treeState, trainSet.featureIds) - testAccuracy(cartTree, "dense feature-parallel", trainSet) - - -- 2. sparse-dense dataset - local trainSet, validSet, clusterExamples, inputs, targets = dt.getSparseDummyData(nExample, nil, 10, nil, nil, 10) - - -- assert that the dataset is valid - for clusterId, exampleIds in ipairs(clusterExamples) do - local input = torch.Tensor(#exampleIds, 10):zero() - for i, exampleId in ipairs(exampleIds) do - input[i]:indexCopy(1, inputs[exampleId].keys, inputs[exampleId].values) - end - assert(input:std(1):mean() < 0.05) - end - - local cartTrainer = dt.CartTrainer(trainSet, minLeafSize, maxLeafNodes) - local treeState = dt.GiniState(trainSet:getExampleIds()) - local cartTree, nleaf = cartTrainer:train(treeState, trainSet.featureIds) - - mytester:assert(nleaf == nExample) -- for dense inputs, minLeafSize =1 and maxLeafNode = inf, this is true - testAccuracy(cartTree, "sparse-dense train single-thread first", trainSet, 0.99) - - local shuffle = torch.LongTensor():randperm(10) - for i, input in ipairs(inputs) do - input.keys = input.keys:index(1, shuffle) - input.values = input.values:index(1, shuffle) - input._map = nil - end - testAccuracy(cartTree, "sparse-dense shuffled keys train single-thread first", trainSet, 0.99) - testAccuracy(cartTree, "sparse-dense valid single-thread first", validSet, 0.8) - - -- 3. sparse dataset - local trainSet, validSet = dt.getSparseDummyData(nExample, 2, 10, nil, nil, 9) - - local cartTrainer = dt.CartTrainer(trainSet, minLeafSize, maxLeafNodes) - local treeState = dt.GiniState(trainSet:getExampleIds()) - local cartTree, nleaf = cartTrainer:train(treeState, trainSet.featureIds) - cartTree.branchleft = function() return true end - - mytester:assert(nleaf < nExample) -- for dense inputs, minLeafSize =1 and maxLeafNode = inf, this is true - testAccuracy(cartTree, "sparse train single-thread first", trainSet, 0.9) -- the TreeBrancher drops examples with missing features, making it difficult to overfit - testAccuracy(cartTree, "sparse valid single-thread first", validSet, 0.8) -end - -function dttest.GradientBoostTrainer() - local nExample = 100 - local trainSet, validSet = dt.getSparseDummyData(nExample, 2, 10, nil, nil, 9) - - local maxLeafNode, minLeafSize = nExample/2, nExample/10 - local loss = nn.LogitBoostCriterion(false) - - local cartTrainer = dt.CartTrainer(trainSet, minLeafSize, maxLeafNode) - - local opt = { - lossFunction=loss, - treeTrainer=cartTrainer, - shrinkage=0.1, - downsampleRatio=6, - featureBaggingSize=-1, - nTree=14, - evalFreq=8, - earlyStop=0 -- no early-stopping - } - - -- test single-thread - local trainer = dt.GradientBoostTrainer(opt) - local decisionForest = trainer:train(trainSet, trainSet.featureIds, validSet) - - mytester:assert(#decisionForest.trees == opt.nTree) - testAccuracy(decisionForest, "sparse train single-thread first", trainSet, 0.98) - testAccuracy(decisionForest, "sparse valid single-thread first", validSet, 0.95) - - -- test stateless - local decisionForest = trainer:train(trainSet, trainSet.featureIds, validSet) - - mytester:assert(#decisionForest.trees == opt.nTree) - testAccuracy(decisionForest, "sparse train single-thread second", trainSet, 0.98) - testAccuracy(decisionForest, "sparse valid single-thread second", validSet, 0.95) - - -- test feature-parallel - local nThread = 2 - cartTrainer:featureParallel(nThread) - - local trainer = dt.GradientBoostTrainer(opt) - local decisionForest = trainer:train(trainSet, trainSet.featureIds, validSet) - - mytester:assert(#decisionForest.trees == opt.nTree) - testAccuracy(decisionForest, "sparse train feature-parallel first", trainSet, 0.98) - testAccuracy(decisionForest, "sparse valid feature-parallel first", validSet, 0.95) - -end - -function dttest.RandomForestTrainer() - local nExample = 100 - local trainSet, validSet = dt.getSparseDummyData(nExample, 2, 10, nil, nil, 9) - - local opt = { - activeRatio=0.5, - featureBaggingSize=5, - nTree=14, - maxLeafNodes=nExample/2, - minLeafSize=nExample/10, - } - - local trainer = dt.RandomForestTrainer(opt) - - local decisionForest = trainer:train(trainSet, trainSet.featureIds) - mytester:assert(#decisionForest.trees == opt.nTree) - testAccuracy(decisionForest, "sparse train single-thread first", trainSet, 0.98) - testAccuracy(decisionForest, "sparse valid single-thread first", validSet, 0.95) - - -- test stateless - local decisionForest = trainer:train(trainSet, trainSet.featureIds) - - mytester:assert(#decisionForest.trees == opt.nTree) - testAccuracy(decisionForest, "sparse train single-thread second", trainSet, 0.98) - testAccuracy(decisionForest, "sparse valid single-thread second", validSet, 0.95) - - -- test tree-parallel - local nThread = 2 - trainer:treeParallel(nThread) - - local trainer = dt.RandomForestTrainer(opt) - local decisionForest = trainer:train(trainSet, trainSet.featureIds) - - mytester:assert(#decisionForest.trees == opt.nTree) - testAccuracy(decisionForest, "sparse train tree-parallel first", trainSet, 0.98) - testAccuracy(decisionForest, "sparse valid tree-parallel first", validSet, 0.95) -end - -function dttest.WorkPool() - local nThread = 2 - local wp = dt.WorkPool(nThread) - - -- 1. some easy tests - local store = {key='nick',value=7} - wp:update('storeKeyValue', store) - - wp:update('require', {libname='decisiontree', varname='dt'}) - - local bias = 2 - local obj = nn.MSECriterion() - wp:update('require', {libname='decisiontree', varname='dt'}) - wp:writeup('execute', function(store) return bias + obj:updateOutput(torch.Tensor{1},torch.Tensor{1}) + store.nick end) - - local taskname, res = wp:read() - mytester:assert(taskname == 'execute') - mytester:assert(res == 9) - - -- 2. trying to reproduce a difficult error - local trainSet, validSet = dt.getSparseDummyData() - - -- setup worker store (each worker will have its own copy) - local store = { - trainSet=trainSet, - minLeafSize=2 - } - wp:update('storeKeysValues', store) - - -- arguments/upvalues - local treeState = dt.GiniState(trainSet:getExampleIds()) - local shardId = 1 - local nShard = nThread - local featureIds = trainSet.featureIds - - local task = function(store, args) - assert(store.trainSet) - assert(store.minLeafSize) - - local bestSplit = args.treeState:findBestSplit(store.trainSet, args.featureIds, store.minLeafSize, args.shardId, args.nShard) - return bestSplit - end - local args = {treeState=treeState,featureIds=featureIds,shardId=shardId,nShard=nShard} - wp:writeup("execute", {func=task,args=args}) - - local taskname, bestSplit = wp:read() - mytester:assert(taskname == 'execute') - mytester:assert(torch.type(bestSplit) == 'table') - - -- closure - local task = function(store) - assert(store.trainSet) - assert(store.minLeafSize) - - local bestSplit = treeState:findBestSplit(store.trainSet, featureIds, store.minLeafSize, shardId, nShard) - return bestSplit - end - wp:writeup("execute", task) - - local taskname, bestSplit = wp:read() - mytester:assert(taskname == 'execute') - mytester:assert(torch.type(bestSplit) == 'table') - - local task = function(store, args) - assert(store.trainSet) - assert(torch.isTypeOf(treeState, 'dt.TreeState'), torch.type(treeState)) - - local bestSplit = treeState:findBestSplit(store.trainSet, featureIds, store.minLeafSize, shardId, nShard) - return bestSplit - end - local args = {featureIds=featureIds,shardId=shardId,nShard=nShard} - wp:writeup("execute", {func=task,args=args}) - - - local taskname, bestSplit = wp:read() - mytester:assert(taskname == 'execute') - mytester:assert(torch.type(bestSplit) == 'table') - - wp:terminate() -end - -function dttest.Sparse2Dense() - local batchsize = 4 - local minFeatureId, maxFeatureId = 10, 100 - local input = {{},{}} - for i=1,batchsize do - local inputsize = math.random(5,10) - input[1][i] = torch.LongTensor(inputsize):random(minFeatureId,maxFeatureId) - input[2][i] = torch.Tensor(inputsize):uniform(0,1) - end - local s2d = nn.Sparse2Dense(torch.LongTensor():range(minFeatureId,maxFeatureId)) - -- test 2d forward - local output = s2d:forward(input) - local output2 = torch.Tensor(batchsize, maxFeatureId-minFeatureId+1):zero() - local featureMap = {} - local j = 0 - for i=minFeatureId,maxFeatureId do - j = j + 1 - featureMap[i] = j - end - for i=1,batchsize do - local keys, values = input[1][i], input[2][i] - for j=1,keys:size(1) do - output2[{i,featureMap[keys[j] ]}] = values[j] - end - end - mytester:assertTensorEq(output, output2, 0.000001) - -- test 1d forward - local input = {input[1][batchsize], input[2][batchsize]} - local output = s2d:forward(input) - mytester:assertTensorEq(output, output2[batchsize], 0.000001) -end - -function dttest.Sparse2DenseDouble() - local batchsize = 4 - local minFeatureId, maxFeatureId = 10, 100 - local input = {{},{}} - for i=1,batchsize do - local inputsize = math.random(5,10) - input[1][i] = torch.LongTensor(inputsize):random(minFeatureId,maxFeatureId) - input[2][i] = torch.Tensor(inputsize):uniform(0,1):double() - end - local s2d = nn.Sparse2Dense(torch.LongTensor():range(minFeatureId,maxFeatureId)) - s2d:double() - -- test 2d forward - local output = s2d:forward(input) - local output2 = torch.Tensor(batchsize, maxFeatureId-minFeatureId+1):zero():double() - local featureMap = {} - local j = 0 - for i=minFeatureId,maxFeatureId do - j = j + 1 - featureMap[i] = j - end - for i=1,batchsize do - local keys, values = input[1][i], input[2][i] - for j=1,keys:size(1) do - output2[{i,featureMap[keys[j] ]}] = values[j] - end - end - mytester:assertTensorEq(output, output2, 0.000001) - -- test 1d forward - local input = {input[1][batchsize], input[2][batchsize]} - local output = s2d:forward(input) - mytester:assertTensorEq(output, output2[batchsize], 0.000001) -end - -function dttest.LogitBoostCriterion() - local input = torch.randn(10) - local target = torch.LongTensor(10):random(0,1):type(torch.type(input)) - - local lb = nn.LogitBoostCriterion(false) - local loss = lb:updateOutput(input, target) - - local loss2 = 0 - for i=1,10 do - loss2 = loss2 + math.log(1 + math.exp(target[i] <= 0 and input[i] or -input[i])) - end - mytester:assert(math.abs(loss - loss2) < 0.00001) - - local gradInput = lb:updateGradInput(input, target) - local gradInput2 = gradInput:clone():zero() - for i=1,10 do - local p = dt.logistic(input[i]) - gradInput2[i] = (target[i] <= 0) and p or (p - 1) - end - mytester:assertTensorEq(gradInput, gradInput2, 0.000001) - - local hessInput = lb:updateHessInput(input, target) - local hessInput2 = hessInput:clone():zero() - for i=1,10 do - local p = dt.logistic(input[i]) - hessInput2[i] = p * (1.0 - p) - end - mytester:assertTensorEq(hessInput, hessInput2, 0.000001) -end - -function dttest.DFD() - local nExample = 100 - local batchsize = 4 - local inputsize = 10 - - -- train Random Forest - local trainSet, validSet, clusterExamples, inputs, targets = dt.getDenseDummyData(nExample, nil, inputsize) - local opt = { - activeRatio=0.5, - featureBaggingSize=5, - nTree=4, - maxLeafNodes=nExample/2, - minLeafSize=nExample/10, - } - local trainer = dt.RandomForestTrainer(opt) - local df = trainer:train(trainSet, trainSet.featureIds) - mytester:assert(#df.trees == opt.nTree) - - local dfd = nn.DFD(df) - dfd = nn.DFD(dfd:getReconstructionInfo()) - local dfd2 = nn.DFD(dfd:getReconstructionInfo(), true) - local input = validSet.input:sub(1,batchsize) - local output = dfd:forward(input) - local output2 = dfd2:forward(input) - - local _ = require 'moses' - - local function hasKey(keys,key) - local found = false - keys:apply(function(x) - if x == key then - found = true - end - end) - return found - end - - for i=1,batchsize do - local nodes = {} - local keys = output[1][i] - local keys2 = output2[1][i] - for j,tree in ipairs(df.trees) do - local stack = {} - tree:score(input[i], stack) - mytester:assert(hasKey(keys2, stack[#stack]._nodeId)) - - for k,node in ipairs(stack) do - if k > 1 then - assert(node._nodeId) - mytester:assert(hasKey(keys, node._nodeId), string.format("missing key=%d in %s", node._nodeId, tostring(keys))) - table.insert(nodes, node._nodeId) - end - end - end - mytester:assert(#nodes == keys:size(1)) - mytester:assert(#df.trees == keys2:size(1)) - end -end - -function dttest.DFDDouble() - local nExample = 100 - local batchsize = 4 - local inputsize = 10 - - -- train Random Forest - local trainSet, validSet, clusterExamples, inputs, targets = dt.getDenseDummyData(nExample, nil, inputsize) - local opt = { - activeRatio=0.5, - featureBaggingSize=5, - nTree=4, - maxLeafNodes=nExample/2, - minLeafSize=nExample/10, - } - local trainer = dt.RandomForestTrainer(opt) - local df = trainer:train(trainSet, trainSet.featureIds) - mytester:assert(#df.trees == opt.nTree) - - local dfd = nn.DFD(df) - dfd:double() - dfd = nn.DFD(dfd:getReconstructionInfo()) - local dfd2 = nn.DFD(dfd:getReconstructionInfo(), true) - local input = validSet.input:sub(1,batchsize):double() - local output = dfd:forward(input) - local output2 = dfd2:forward(input) - - local _ = require 'moses' - - local function hasKey(keys,key) - local found = false - keys:apply(function(x) - if x == key then - found = true - end - end) - return found - end - - for i=1,batchsize do - local nodes = {} - local keys = output[1][i] - local keys2 = output2[1][i] - for j,tree in ipairs(df.trees) do - local stack = {} - tree:score(input[i], stack) - mytester:assert(hasKey(keys2, stack[#stack]._nodeId)) - - for k,node in ipairs(stack) do - if k > 1 then - assert(node._nodeId) - mytester:assert(hasKey(keys, node._nodeId), string.format("missing key=%d in %s", node._nodeId, tostring(keys))) - table.insert(nodes, node._nodeId) - end - end - end - mytester:assert(#nodes == keys:size(1)) - mytester:assert(#df.trees == keys2:size(1)) - end -end - -function dttest.uniquecounts() -- DEPRECATED - local target = torch.LongTensor(100):random(1,3) - local input = torch.Tensor() - local inputset = {input=input, target=target} - - local counts = dt.uniquecounts(nil, inputset, 3) - - mytester:assert(counts:sum() == 100) - mytester:assert(counts:nElement() == 3) - - local res = torch.Tensor(3):zero() - target:apply(function(t) res[t] = res[t] + 1 end) - - mytester:assertTensorEq(counts, res) -end - -function dttest.entropy() -- DEPRECATED - -- 2 clusters with a bit overlap between classes: - local input = torch.Tensor(100,2) - input:narrow(1,1,50):normal(-1,.01) - input:narrow(1,51,50):normal(2,.01) - - local target = torch.LongTensor(100):fill(3) - target:narrow(1,1,45):fill(1) - target:narrow(1,56,45):fill(2) - - local inputset = {input=input, target=target} - - -- test entropy() - local fullent = dt.entropy(inputset) - - local halfset = {input=input:narrow(1,1,50), target=target:narrow(1,1,50)} - local halfent = dt.entropy(halfset) - - local perfectset = {input=input:narrow(1,56,45), target=target:narrow(1,56,45)} - local perfectent = dt.entropy(perfectset) - - mytester:assert(fullent > halfent) - mytester:assert(halfent > perfectent) - mytester:assert(perfectent < 0.0000001 and perfectent >= 0) -end - -function dt.test(tests) - math.randomseed(os.time()) - mytester = torch.Tester() - mytester:add(dttest) - mytester:run(tests) -end diff --git a/contrib/lua-torch/decisiontree/utils.h b/contrib/lua-torch/decisiontree/utils.h deleted file mode 100644 index 8a0196a589..0000000000 --- a/contrib/lua-torch/decisiontree/utils.h +++ /dev/null @@ -1,45 +0,0 @@ -#include "error.h" - -#define check_tensors(L, a, b) \ - do { \ - if ((a)->nDimension != (b)->nDimension) \ - return LUA_HANDLE_ERROR_STR((L), "different tensor dimensions"); \ - for (int __local__var = 0; __local__var < (a)->nDimension; __local__var++) \ - if ((a)->size[__local__var] != (b)->size[__local__var]) \ - return LUA_HANDLE_ERROR_STR((L), "different tensor sizes"); \ - } while (0) - -#define check_tensor(L, t, type) \ - do { \ - if (!type##_isContiguous(t)) \ - return LUA_HANDLE_ERROR_STR((L), "tensor should be contiguous"); \ - } while (0) - -#define get_tensor_size(t, type) \ - (TH##type##Tensor_nElement(t)) - -#define get_tensor(L, idx, type) \ - (TH##type##Tensor *)luaT_checkudata(L, idx, "torch." #type "Tensor") - -static int push_table_contents(lua_State *L, int arg) -{ - int size = 0; - while(1) { - lua_checkstack(L, 1); - lua_rawgeti(L, arg, size + 1); - if (lua_isnil(L, -1)) { - lua_pop(L, 1); - break; - } - size++; - } - return size; -} - -#define verify_push_table_contents(L, idx, count) do { \ - int __tmp_count = push_table_contents(L, idx); \ - if (__tmp_count != count) { \ - lua_pop(L, __tmp_count); \ - LUA_HANDLE_ERROR_STR(L, "Table sizes do not match"); \ - } \ - } while(0) diff --git a/contrib/lua-torch/decisiontree/utils.lua b/contrib/lua-torch/decisiontree/utils.lua deleted file mode 100644 index c32c3d08ba..0000000000 --- a/contrib/lua-torch/decisiontree/utils.lua +++ /dev/null @@ -1,125 +0,0 @@ -local dt = require "decisiontree._env" - --- returns a buffer table local to a thread (no serialized) -function dt.getBufferTable(name) - local dt = require 'decisiontree' - assert(torch.type(name) == 'string') - dt.buffer = dt.buffer or {} - dt.buffer[name] = dt.buffer[name] or {} - return dt.buffer[name] -end - -function dt.getSparseDummyData(nExample, nCluster, nFeature, overlap, nValid, nActive) - local dt = require 'decisiontree' - if torch.type(nExample) == 'table' then - local opt = nExample - nExample = opt.nExample - nCluster = opt.nCluster - nFeature = opt.nFeature - overlap = opt.overlap - nValid = opt.nValid - nActive = opt.nActive - end - nExample = nExample or 100 -- training set size - nCluster = nCluster or 10 - assert(nCluster >= 2) - nFeature = math.max(2, nFeature or 10) - overlap = overlap or 0 - nValid = nValid or nExample/10 -- validation set size - nActive = nActive or math.max(2, nFeature / 2) - - -- sample nCluster centers - local clusterCenter = torch.rand(nCluster, nFeature) - local clusterLabel = torch.LongTensor(nCluster) - local clusterExamples = {} - for i=1,nCluster do - clusterCenter[i]:add(i) - clusterLabel[i] = i % 2 - clusterExamples[i] = {} - end - - local sparseCenter = torch.Tensor() - - local shuffle = torch.LongTensor() - - -- build dataset in pseudo-dense format - local inputs = {} - local targets = torch.Tensor(nExample+nValid) - for i=1,nExample+nValid do - local clusterIdx = torch.random(1,nCluster) - table.insert(clusterExamples[clusterIdx], i) - - shuffle:randperm(nFeature) - local keys = torch.LongTensor(nActive):copy(shuffle:narrow(1,1,nActive)) - sparseCenter:index(clusterCenter[clusterIdx], 1, keys) - local stdiv = i <= nExample and 100 or 1000 - local values = torch.randn(nActive):div(stdiv):add(sparseCenter) - - table.insert(inputs, torch.SparseTensor(keys, values)) - - local label = clusterLabel[clusterIdx] - if math.random() < overlap then - targets[i] = label == 1 and 0 or 1 - else - targets[i] = label - end - end - - local _ = require 'moses' - local validSet = dt.DataSet(_.slice(inputs, nExample+1, nExample+nValid), targets:narrow(1,nExample+1,nValid)) - local trainSet = dt.DataSet(_.slice(inputs, 1, nExample), targets:narrow(1,1,nExample)) - - return trainSet, validSet, clusterExamples, inputs, targets -end - -function dt.getDenseDummyData(nExample, nCluster, nFeature, overlap, nValid) - local dt = require 'decisiontree' - if torch.type(nExample) == 'table' then - local opt = nExample - nExample = opt.nExample - nCluster = opt.nCluster - nFeature = opt.nFeature - overlap = opt.overlap - nValid = opt.nValid - end - nExample = nExample or 100 -- training set size - nCluster = nCluster or 10 - assert(nCluster >= 2) - nFeature = math.max(2, nFeature or 10) - overlap = overlap or 0 - nValid = nValid or nExample/10 -- validation set size - - -- sample nCluster centers - local clusterCenter = torch.rand(nCluster, nFeature) - local clusterLabel = torch.LongTensor(nCluster) - local clusterExamples = {} - for i=1,nCluster do - clusterCenter[i]:add(i) - clusterLabel[i] = i % 2 - clusterExamples[i] = {} - end - - -- build dataset in pseudo-dense format - local inputs = torch.Tensor(nExample+nValid, nFeature) - local targets = torch.Tensor(nExample+nValid) - for i=1,nExample+nValid do - local clusterIdx = torch.random(1,nCluster) - table.insert(clusterExamples[clusterIdx], i) - - local stdiv = i <= nExample and 100 or 1000 - inputs[i]:normal():div(stdiv):add(clusterCenter[clusterIdx]) - - local label = clusterLabel[clusterIdx] - if math.random() < overlap then - targets[i] = label == 1 and 0 or 1 - else - targets[i] = label - end - end - - local _ = require 'moses' - local validSet = dt.DataSet(inputs:narrow(1,nExample+1,nValid), targets:narrow(1,nExample+1,nValid)) - local trainSet = dt.DataSet(inputs:narrow(1,1,nExample), targets:narrow(1,1,nExample)) - - return trainSet, validSet, clusterExamples, inputs, targets -end diff --git a/contrib/lua-torch/nn/.gitignore b/contrib/lua-torch/nn/.gitignore deleted file mode 100644 index e0fa91edad..0000000000 --- a/contrib/lua-torch/nn/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -build/ -THNN_h.lua diff --git a/contrib/lua-torch/nn/.luacheckrc b/contrib/lua-torch/nn/.luacheckrc deleted file mode 100644 index 3d358e9c01..0000000000 --- a/contrib/lua-torch/nn/.luacheckrc +++ /dev/null @@ -1,13 +0,0 @@ --- -*- mode: lua; -*- -std = "luajit" - -globals = { - "torch", - "nn", - "include", -} - -unused_args = false - - -files['test.lua'].redefined = false diff --git a/contrib/lua-torch/nn/.travis.yml b/contrib/lua-torch/nn/.travis.yml deleted file mode 100644 index 1d10e0fb5f..0000000000 --- a/contrib/lua-torch/nn/.travis.yml +++ /dev/null @@ -1,56 +0,0 @@ -language: c -compiler: - - gcc - - clang -cache: - directories: - - $HOME/OpenBlasInstall -sudo: false -env: - - TORCH_LUA_VERSION=LUAJIT21 - - TORCH_LUA_VERSION=LUA51 - - TORCH_LUA_VERSION=LUA52 -addons: - apt: - packages: - - cmake - - gfortran - - gcc-multilib - - gfortran-multilib - - liblapack-dev - - build-essential - - gcc - - g++ - - curl - - cmake - - libreadline-dev - - git-core - - libqt4-core - - libqt4-gui - - libqt4-dev - - libjpeg-dev - - libpng-dev - - ncurses-dev - - imagemagick - - libzmq3-dev - - gfortran - - unzip - - gnuplot - - gnuplot-x11 -before_script: -- export ROOT_TRAVIS_DIR=$(pwd) -- export INSTALL_PREFIX=~/torch/install -- ls $HOME/OpenBlasInstall/lib || (cd /tmp/ && git clone https://github.com/xianyi/OpenBLAS.git -b master && cd OpenBLAS && (make NO_AFFINITY=1 -j$(getconf _NPROCESSORS_ONLN) 2>/dev/null >/dev/null) && make PREFIX=$HOME/OpenBlasInstall install) -- git clone https://github.com/torch/distro.git ~/torch --recursive -- cd ~/torch && git submodule update --init --recursive -- mkdir build && cd build -- export CMAKE_LIBRARY_PATH=$HOME/OpenBlasInstall/include:$HOME/OpenBlasInstall/lib:$CMAKE_LIBRARY_PATH -- cmake .. -DCMAKE_INSTALL_PREFIX="${INSTALL_PREFIX}" -DCMAKE_BUILD_TYPE=Release -DWITH_${TORCH_LUA_VERSION}=ON -- make && make install -- cd $ROOT_TRAVIS_DIR -- export LD_LIBRARY_PATH=${INSTALL_PREFIX}/lib:$LD_LIBRARY_PATH -script: -- ${INSTALL_PREFIX}/bin/luarocks make rocks/nn-scm-1.rockspec -- export PATH=${INSTALL_PREFIX}/bin:$PATH -- export TESTLUA=$(which luajit lua | head -n 1) -- ${TESTLUA} -lnn -e "t=nn.test(); if t.errors[1] then os.exit(1) end" diff --git a/contrib/lua-torch/nn/Abs.lua b/contrib/lua-torch/nn/Abs.lua deleted file mode 100644 index b32b64f799..0000000000 --- a/contrib/lua-torch/nn/Abs.lua +++ /dev/null @@ -1,22 +0,0 @@ -local Abs, parent = torch.class('nn.Abs', 'nn.Module') - -function Abs:__init() - parent.__init(self) -end - -function Abs:updateOutput(input) - input.THNN.Abs_updateOutput( - input:cdata(), - self.output:cdata() - ) - return self.output -end - -function Abs:updateGradInput(input, gradOutput) - input.THNN.Abs_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata() - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/AbsCriterion.lua b/contrib/lua-torch/nn/AbsCriterion.lua deleted file mode 100644 index 65e2f8ae18..0000000000 --- a/contrib/lua-torch/nn/AbsCriterion.lua +++ /dev/null @@ -1,32 +0,0 @@ -local AbsCriterion, parent = torch.class('nn.AbsCriterion', 'nn.Criterion') - -function AbsCriterion:__init(sizeAverage) - parent.__init(self) - if sizeAverage ~= nil then - self.sizeAverage = sizeAverage - else - self.sizeAverage = true - end -end - -function AbsCriterion:updateOutput(input, target) - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.AbsCriterion_updateOutput( - input:cdata(), - target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage - ) - self.output = self.output_tensor[1] - return self.output -end - -function AbsCriterion:updateGradInput(input, target) - input.THNN.AbsCriterion_updateGradInput( - input:cdata(), - target:cdata(), - self.gradInput:cdata(), - self.sizeAverage - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Add.lua b/contrib/lua-torch/nn/Add.lua deleted file mode 100644 index d071a15b3a..0000000000 --- a/contrib/lua-torch/nn/Add.lua +++ /dev/null @@ -1,66 +0,0 @@ -local Add, parent = torch.class('nn.Add', 'nn.Module') - -function Add:__init(inputSize,scalar) - parent.__init(self) - - local size = inputSize - if scalar then size=1 end - self.scalar = scalar - self.bias = torch.Tensor(size) - self.gradBias = torch.Tensor(size) - - self._ones = torch.Tensor{1} - - self:reset() -end - -function Add:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1./math.sqrt(self.bias:size(1)) - end - - self.bias:uniform(-stdv, stdv) -end - -function Add:updateOutput(input) - self.output:resizeAs(input):copy(input) - if self.scalar then - self.output:add(self.bias[1]); - else - if input:isSameSizeAs(self.bias) then - self.output:add(self.bias) - else - local batchSize = input:size(1) - if self._ones:size(1) ~= batchSize then - self._ones:resize(batchSize):fill(1) - end - local bias = self.bias:view(-1) - local output = self.output:view(batchSize, -1) - output:addr(1, self._ones, bias) - end - end - return self.output -end - -function Add:updateGradInput(input, gradOutput) - if self.gradInput then - self.gradInput:resizeAs(gradOutput):copy(gradOutput) - return self.gradInput - end -end - -function Add:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - if self.gradBias:size(1) == 1 then - self.gradBias[1] = self.gradBias[1] + scale*gradOutput:sum(); - else - if input:isSameSizeAs(self.bias) then - self.gradBias:add(scale, gradOutput) - else - local gradOutput = gradOutput:view(input:size(1), -1) - self.gradBias:view(-1):addmv(scale, gradOutput:t(), self._ones) - end - end -end diff --git a/contrib/lua-torch/nn/AddConstant.lua b/contrib/lua-torch/nn/AddConstant.lua deleted file mode 100644 index b686d719ca..0000000000 --- a/contrib/lua-torch/nn/AddConstant.lua +++ /dev/null @@ -1,50 +0,0 @@ -local AddConstant, parent = torch.class('nn.AddConstant', 'nn.Module') - -function AddConstant:__init(constant_scalar,ip) - parent.__init(self) - self.constant_scalar = constant_scalar - - -- default for inplace is false - self.inplace = ip or false - if (ip and type(ip) ~= 'boolean') then - error('in-place flag must be boolean') - end -end - -function AddConstant:updateOutput(input) - assert(type(self.constant_scalar) == 'number' or - (torch.isTensor(self.constant_scalar) and input:nDimension() <= 2 and - input:size(input:nDimension()) == self.constant_scalar:size(1)), - 'input is not scalar or doesn\'t match with the dimension of constant!') - local tmp - if torch.isTensor(self.constant_scalar) and input:nDimension() == 2 then - local nOutput = self.constant_scalar:size(1) - tmp = self.constant_scalar.new() - tmp:resize(1,nOutput) - tmp:copy(self.constant_scalar) - tmp = tmp:expand(input:size(1),nOutput) - else - tmp = self.constant_scalar - end - if self.inplace then - input:add(tmp) - self.output:set(input) - else - self.output:resizeAs(input) - self.output:copy(input) - self.output:add(tmp) - end - return self.output -end - -function AddConstant:updateGradInput(input, gradOutput) - if self.inplace then - self.gradInput:set(gradOutput) - -- restore previous input value - input:add(-self.constant_scalar) - else - self.gradInput:resizeAs(gradOutput) - self.gradInput:copy(gradOutput) - end - return self.gradInput -end diff --git a/contrib/lua-torch/nn/BCECriterion.lua b/contrib/lua-torch/nn/BCECriterion.lua deleted file mode 100644 index 8bb5f81787..0000000000 --- a/contrib/lua-torch/nn/BCECriterion.lua +++ /dev/null @@ -1,64 +0,0 @@ -local THNN = require 'nn.THNN' -local BCECriterion, parent = torch.class('nn.BCECriterion', 'nn.Criterion') - -function BCECriterion:__init(weights, sizeAverage) - parent.__init(self) - if sizeAverage ~= nil then - self.sizeAverage = sizeAverage - else - self.sizeAverage = true - end - if weights ~= nil then - assert(weights:dim() == 1, "weights input should be 1-D Tensor") - self.weights = weights - end -end - - -function BCECriterion:__len() - return self.weights and #self.weights or 0 -end - -function BCECriterion:updateOutput(input, target) - -- - log(input) * target - log(1 - input) * (1 - target) - assert( input:nElement() == target:nElement(), - "input and target size mismatch") - self.output_tensor = self.output_tensor or input.new(1) - - local weights = self.weights - if weights ~= nil and target:dim() ~= 1 then - weights = self.weights:view(1, target:size(2)):expandAs(target) - end - - input.THNN.BCECriterion_updateOutput( - input:cdata(), - target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage, - THNN.optionalTensor(weights) - ) - - self.output = self.output_tensor[1] - return self.output -end - -function BCECriterion:updateGradInput(input, target) - -- - (target - input) / ( input (1 - input) ) - assert( input:nElement() == target:nElement(), - "input and target size mismatch") - - local weights = self.weights - if weights ~= nil and target:dim() ~= 1 then - weights = self.weights:view(1, target:size(2)):expandAs(target) - end - - input.THNN.BCECriterion_updateGradInput( - input:cdata(), - target:cdata(), - self.gradInput:cdata(), - self.sizeAverage, - THNN.optionalTensor(weights) - ) - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/BatchNormalization.lua b/contrib/lua-torch/nn/BatchNormalization.lua deleted file mode 100644 index 8dfc576b3b..0000000000 --- a/contrib/lua-torch/nn/BatchNormalization.lua +++ /dev/null @@ -1,213 +0,0 @@ ---[[ - This file implements Batch Normalization as described in the paper: - "Batch Normalization: Accelerating Deep Network Training - by Reducing Internal Covariate Shift" - by Sergey Ioffe, Christian Szegedy - - This implementation is useful for inputs NOT coming from convolution layers. - For convolution layers, use nn.SpatialBatchNormalization. - - The operation implemented is: - y = ( x - mean(x) ) - -------------------- * gamma + beta - standard-deviation(x) - where gamma and beta are learnable parameters. - - The learning of gamma and beta is optional. - - Usage: - with learnable parameters: nn.BatchNormalization(N [,eps] [,momentum]) - where N = dimensionality of input - without learnable parameters: nn.BatchNormalization(N [,eps] [,momentum], false) - - eps is a small value added to the standard-deviation to avoid divide-by-zero. - Defaults to 1e-5 - - In training time, this layer keeps a running estimate of it's computed mean and std. - The running sum is kept with a default momentum of 0.1 (unless over-ridden) - In test time, this running mean/std is used to normalize. -]]-- -local BN,parent = torch.class('nn.BatchNormalization', 'nn.Module') -local THNN = require 'nn.THNN' - -BN.__version = 2 - --- expected dimension of input -BN.nDim = 2 - -function BN:__init(nOutput, eps, momentum, affine) - parent.__init(self) - assert(nOutput and type(nOutput) == 'number', - 'Missing argument #1: dimensionality of input. ') - assert(nOutput ~= 0, 'To set affine=false call BatchNormalization' - .. '(nOutput, eps, momentum, false) ') - if affine ~= nil then - assert(type(affine) == 'boolean', 'affine has to be true/false') - self.affine = affine - else - self.affine = true - end - self.eps = eps or 1e-5 - self.train = true - self.momentum = momentum or 0.1 - self.running_mean = torch.zeros(nOutput) - self.running_var = torch.ones(nOutput) - - if self.affine then - self.weight = torch.Tensor(nOutput) - self.bias = torch.Tensor(nOutput) - self.gradWeight = torch.Tensor(nOutput) - self.gradBias = torch.Tensor(nOutput) - self:reset() - end -end - -function BN:reset() - if self.weight then - self.weight:uniform() - end - if self.bias then - self.bias:zero() - end - self.running_mean:zero() - self.running_var:fill(1) -end - -function BN:checkInputDim(input) - local iDim = input:dim() - assert(iDim == self.nDim or - (iDim == self.nDim - 1 and self.train == false), string.format( - 'only mini-batch supported (%dD tensor), got %dD tensor instead', - self.nDim, iDim)) - local featDim = (iDim == self.nDim - 1) and 1 or 2 - assert(input:size(featDim) == self.running_mean:nElement(), string.format( - 'got %d-feature tensor, expected %d', - input:size(featDim), self.running_mean:nElement())) -end - -local function makeContiguous(self, input, gradOutput) - if not input:isContiguous() then - self._input = self._input or input.new() - self._input:resizeAs(input):copy(input) - input = self._input - end - if gradOutput then - if not gradOutput:isContiguous() then - self._gradOutput = self._gradOutput or gradOutput.new() - self._gradOutput:resizeAs(gradOutput):copy(gradOutput) - gradOutput = self._gradOutput - end - end - return input, gradOutput -end - -local function makeBatch(self, input) - local iDim = input:dim() - if self.train == false and iDim == self.nDim - 1 then - return nn.utils.addSingletonDimension(input, input, 1) - else - return input - end -end - -function BN:updateOutput(input) - self:checkInputDim(input) - - input = makeContiguous(self, input) - input = makeBatch(self, input) - - self.save_mean = self.save_mean or input.new() - self.save_mean:resizeAs(self.running_mean) - self.save_std = self.save_std or input.new() - self.save_std:resizeAs(self.running_var) - - input.THNN.BatchNormalization_updateOutput( - input:cdata(), - self.output:cdata(), - THNN.optionalTensor(self.weight), - THNN.optionalTensor(self.bias), - self.running_mean:cdata(), - self.running_var:cdata(), - self.save_mean:cdata(), - self.save_std:cdata(), - self.train, - self.momentum, - self.eps) - - return self.output -end - -local function backward(self, input, gradOutput, scale, gradInput, gradWeight, gradBias) - self:checkInputDim(input) - self:checkInputDim(gradOutput) - assert(self.save_mean and self.save_std, 'must call :updateOutput() first') - - input, gradOutput = makeContiguous(self, input, gradOutput) - input = makeBatch(self, input) - gradOutput = makeBatch(self, gradOutput) - - scale = scale or 1 - if gradInput then - gradInput:resizeAs(gradOutput) - end - - input.THNN.BatchNormalization_backward( - input:cdata(), - gradOutput:cdata(), - THNN.optionalTensor(gradInput), - THNN.optionalTensor(gradWeight), - THNN.optionalTensor(gradBias), - THNN.optionalTensor(self.weight), - self.running_mean:cdata(), - self.running_var:cdata(), - self.save_mean:cdata(), - self.save_std:cdata(), - self.train, - scale, - self.eps) - - return self.gradInput -end - -function BN:backward(input, gradOutput, scale) - return backward(self, input, gradOutput, scale, self.gradInput, self.gradWeight, self.gradBias) -end - -function BN:updateGradInput(input, gradOutput) - return backward(self, input, gradOutput, 1, self.gradInput) -end - -function BN:accGradParameters(input, gradOutput, scale) - return backward(self, input, gradOutput, scale, nil, self.gradWeight, self.gradBias) -end - -function BN:read(file, version) - parent.read(self, file) - if version < 2 then - if self.running_std then - self.running_var = self.running_std:pow(-2):add(-self.eps) - self.running_std = nil - end - end -end - -function BN:clearState() - -- first 5 buffers are not present in the current implementation, - -- but we keep them for cleaning old saved models - nn.utils.clear(self, { - 'buffer', - 'buffer2', - 'centered', - 'std', - 'normalized', - '_input', - '_gradOutput', - 'save_mean', - 'save_std', - }) - return parent.clearState(self) -end - -function BN:__tostring__() - return string.format('%s (%dD) (%d)', torch.type(self), self.nDim, self.running_mean:nElement()) -end diff --git a/contrib/lua-torch/nn/Bilinear.lua b/contrib/lua-torch/nn/Bilinear.lua deleted file mode 100644 index 9350b03ec3..0000000000 --- a/contrib/lua-torch/nn/Bilinear.lua +++ /dev/null @@ -1,163 +0,0 @@ -local Bilinear, parent = torch.class('nn.Bilinear', 'nn.Module') - -local function isint(x) return type(x) == 'number' and x == math.floor(x) end -function Bilinear:__assertInput(input) - assert(input and type(input) == 'table' and #input == 2, - 'input should be a table containing two data Tensors') - assert(input[1]:nDimension() == 2 and input[2]:nDimension() == 2, - 'input Tensors should be two-dimensional') - assert(input[1]:size(1) == input[2]:size(1), - 'input Tensors should have the same number of rows (instances)') - assert(input[1]:size(2) == self.weight:size(2), - 'dimensionality of first input is erroneous') - assert(input[2]:size(2) == self.weight:size(3), - 'dimensionality of second input is erroneous') -end -function Bilinear:__assertInputGradOutput(input, gradOutput) - assert(input[1]:size(1) == gradOutput:size(1), - 'number of rows in gradOutput does not match input') - assert(gradOutput:size(2) == self.weight:size(1), - 'number of columns in gradOutput does not output size of layer') -end - -function Bilinear:__init(inputSize1, inputSize2, outputSize, bias) - - -- assertions: - assert(self and inputSize1 and inputSize2 and outputSize, - 'should specify inputSize1 and inputSize2 and outputSize') - assert(isint(inputSize1) and isint(inputSize2) and isint(outputSize), - 'inputSize1 and inputSize2 and outputSize should be integer numbers') - assert(inputSize1 > 0 and inputSize2 > 0 and outputSize > 0, - 'inputSize1 and inputSize2 and outputSize should be positive numbers') - - -- set up model: - parent.__init(self) - local bias = ((bias == nil) and true) or bias - self.weight = torch.Tensor(outputSize, inputSize1, inputSize2) - self.gradWeight = torch.Tensor(outputSize, inputSize1, inputSize2) - if bias then - self.bias = torch.Tensor(outputSize) - self.gradBias = torch.Tensor(outputSize) - end - self.gradInput = {torch.Tensor(), torch.Tensor()} - self:reset() -end - -function Bilinear:reset(stdv) - assert(self) - if stdv then - assert(stdv and type(stdv) == 'number' and stdv > 0, - 'standard deviation should be a positive number') - stdv = stdv * math.sqrt(3) - else - stdv = 1 / math.sqrt(self.weight:size(2)) - end - self.weight:uniform(-stdv, stdv) - if self.bias then self.bias:uniform(-stdv, stdv) end - return self -end - -function Bilinear:updateOutput(input) - assert(self) - self:__assertInput(input) - - -- set up buffer: - self.buff2 = self.buff2 or input[1].new() - self.buff2:resizeAs(input[2]) - - -- compute output scores: - self.output:resize(input[1]:size(1), self.weight:size(1)) - for k = 1,self.weight:size(1) do - torch.mm(self.buff2, input[1], self.weight[k]) - self.buff2:cmul(input[2]) - torch.sum(self.output:narrow(2, k, 1), self.buff2, 2) - end - if self.bias then - self.output:add( - self.bias:reshape(1, self.bias:nElement()):expandAs(self.output) - ) - end - return self.output -end - -function Bilinear:updateGradInput(input, gradOutput) - assert(self) - if self.gradInput then - self:__assertInputGradOutput(input, gradOutput) - - if #self.gradInput == 0 then - for i = 1, 2 do self.gradInput[i] = input[1].new() end - end - - -- compute d output / d input: - self.gradInput[1]:resizeAs(input[1]):fill(0) - self.gradInput[2]:resizeAs(input[2]):fill(0) - - - -- do first slice of weight tensor (k = 1) - self.gradInput[1]:mm(input[2], self.weight[1]:t()) - self.gradInput[1]:cmul(gradOutput:narrow(2,1,1):expand(self.gradInput[1]:size(1), - self.gradInput[1]:size(2))) - self.gradInput[2]:addmm(1, input[1], self.weight[1]) - self.gradInput[2]:cmul(gradOutput:narrow(2,1,1):expand(self.gradInput[2]:size(1), - self.gradInput[2]:size(2))) - - -- do remaining slices of weight tensor - if self.weight:size(1) > 1 then - self.buff1 = self.buff1 or input[1].new() - self.buff1:resizeAs(input[1]) - - for k = 2, self.weight:size(1) do - self.buff1:mm(input[2], self.weight[k]:t()) - self.buff1:cmul(gradOutput:narrow(2,k,1):expand(self.gradInput[1]:size(1), - self.gradInput[1]:size(2))) - self.gradInput[1]:add(self.buff1) - - self.buff2:mm(input[1], self.weight[k]) - self.buff2:cmul(gradOutput:narrow(2,k,1):expand(self.gradInput[2]:size(1), - self.gradInput[2]:size(2))) - self.gradInput[2]:add(self.buff2) - end - end - return self.gradInput - end -end - -function Bilinear:accGradParameters(input, gradOutput, scale) - local scale = scale or 1 - self:__assertInputGradOutput(input, gradOutput) - assert(scale and type(scale) == 'number' and scale >= 0) - - -- make sure we have buffer: - self.buff1 = self.buff1 or input[1].new() - self.buff1:resizeAs(input[1]) - - -- accumulate parameter gradients: - for k = 1,self.weight:size(1) do - torch.cmul( - self.buff1, input[1], gradOutput:narrow(2, k, 1):expandAs(input[1]) - ) - self.gradWeight[k]:addmm(self.buff1:t(), input[2]) - end - if self.bias then self.gradBias:add(scale, gradOutput:sum(1)) end -end - -function Bilinear:sharedAccUpdateGradParameters(input, gradOutput, lr) - -- we do not need to accumulate parameters when sharing: - self:defaultAccUpdateGradParameters(input, gradOutput, lr) -end - -function Bilinear:__tostring__() - return torch.type(self) .. - string.format( - '(%dx%d -> %d) %s', - self.weight:size(2), self.weight:size(3), self.weight:size(1), - (self.bias == nil and ' without bias' or '') - ) -end - -function Bilinear:clearState() - if self.buff2 then self.buff2:set() end - if self.buff1 then self.buff1:set() end - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/Bottle.lua b/contrib/lua-torch/nn/Bottle.lua deleted file mode 100644 index 6dee432f5c..0000000000 --- a/contrib/lua-torch/nn/Bottle.lua +++ /dev/null @@ -1,71 +0,0 @@ -local Bottle, parent = torch.class("nn.Bottle", "nn.Decorator") -local unpack = unpack or table.unpack - -function Bottle:__init(module, nInputDim, nOutputDim) - parent.__init(self, module) - self.nInputDim = nInputDim or 2 - self.nOutputDim = nOutputDim or self.nInputDim - self.dimDelta = self.nInputDim - self.nOutputDim - -- Used to reshape the gradients - self.inShape = torch.Tensor(self.nInputDim) - self.outShape = torch.Tensor(self.nOutputDim) -end - -function Bottle:updateOutput(input) - -- first batchDims dimensions will be fused - local batchDims = input:dim() - self.nInputDim + 1 - -- see if bottle is required - if batchDims > 1 then - -- bottle the first dims - local inSize = torch.LongTensor(input:size()) - local squeezeSize = inSize[{{1, batchDims - 1}}]:prod() - self.inShape:copy(inSize[{{batchDims, input:dim()}}]) - self.inShape[{{1}}]:mul(squeezeSize) - -- Forward with the module's dimension - local newInput = input:view(unpack(self.inShape:totable())) - local output = self.modules[1]:updateOutput(newInput) - assert(output:dim() == self.nOutputDim, - "Wrong number of output dims on module. Expected: " .. - self.nOutputDim .. ' but got ' .. - tostring(output and output:dim())) - self.outShape:copy(torch.LongTensor(output:size())) - if math.abs(self.dimDelta) > 0 then - inSize:resize(inSize:size(1) - self.dimDelta) - end - inSize[{{batchDims, inSize:size(1)}}]:copy(self.outShape) - inSize[{{batchDims}}]:div(squeezeSize) - -- unbottle - self.output:set(output:view(unpack(torch.totable(inSize)))) - else - self.output:set(self.modules[1]:updateOutput(input)) - end - return self.output -end - -function Bottle:updateGradInput(input, gradOutput) - if input:dim() > self.nInputDim then - local input_ = input:view(unpack(self.inShape:totable())) - local gradOutput_ = gradOutput:view(unpack(self.outShape:totable())) - self.modules[1]:updateGradInput(input_, gradOutput_) - if self.modules[1].gradInput then - self.gradInput:set(self.modules[1].gradInput:viewAs(input)) - else - self.gradInput = nil - end - else - if self.modules[1].gradInput then - self.gradInput:set(self.modules[1]:updateGradInput(input, gradOutput)) - else - self.gradInput = nil - end - end - return self.gradInput -end - -function Bottle:accGradParameters(input, gradOutput, scale) - if input:dim() > self.nInputDim then - input = input:view(unpack(self.inShape:totable())) - gradOutput = gradOutput:view(unpack(self.outShape:totable())) - end - self.modules[1]:accGradParameters(input, gradOutput, scale) -end diff --git a/contrib/lua-torch/nn/CAdd.lua b/contrib/lua-torch/nn/CAdd.lua deleted file mode 100644 index 1d7b45726c..0000000000 --- a/contrib/lua-torch/nn/CAdd.lua +++ /dev/null @@ -1,127 +0,0 @@ -local CAdd, parent = torch.class("nn.CAdd", "nn.Module") - -function CAdd:__init(...) - parent.__init(self) - - local arg = {...} - - self.size = torch.LongStorage() - local n = #arg - if n == 1 and torch.type(arg[1]) == 'torch.LongStorage' then - self.size:resize(#arg[1]):copy(arg[1]) - else - self.size:resize(n) - for i=1,n do - self.size[i] = arg[i] - end - end - - self.bias = torch.Tensor(self.size) - self.gradBias = torch.Tensor(self.size) - - self.output:resize(self.size) - - self:reset() -end - -function CAdd:reset(stdv) - if stdv then - --std of uniform distribution on interval [-a,a] = a/sqrt(3) - stdv = stdv * math.sqrt(3) - else - stdv = 1.0/math.sqrt(self.bias:nElement()) - end - self.bias:uniform(-stdv,stdv) -end - -function CAdd:updateOutput(input) - self._output = self._output or input.new() - self._bias = self._bias or input.new() - self._expand = self._expand or input.new() - self._repeat = self._repeat or input.new() - - self.output:resizeAs(input):copy(input) - if input:nElement() == self.bias:nElement() then - self.output:add(self.bias) - else - if self.bias:dim() == input:dim() then - self._output:set(self.output) - self._bias:set(self.bias) - else - local batchSize = input:size(1) - self._output:view(self.output, batchSize, -1) - self._bias:view(self.bias, 1, -1) - end - - self._expand:expandAs(self._bias, self._output) - - --expandAs uses stride 0 and self._expand is not contiguous - --cuda ops may assume contiguous input - if torch.type(input) == 'torch.CudaTensor' then - self._repeat:resizeAs(self._expand):copy(self._expand) - self._output:add(self._repeat) - else - self._output:add(self._expand) - end - end - - return self.output -end - -function CAdd:updateGradInput(input, gradOutput) - self.gradInput = self.gradInput or input.new() - self.gradInput:resizeAs(gradOutput):copy(gradOutput) - - return self.gradInput -end - -function CAdd:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - - self._gradBias = self._gradBias or gradOutput.new() - self._gradOutput = self._gradOutput or gradOutput.new() - self._repeat = self._repeat or gradOutput.new() - - if self.bias:nElement() == gradOutput:nElement() then - self.gradBias:add(scale, gradOutput) - else - if self.bias:dim() == gradOutput:dim() then - self._gradBias:set(self.gradBias) - self._gradOutput:set(gradOutput) - else - local batchSize = input:size(1) - self._gradBias:view(self.gradBias, 1, -1) - self._gradOutput:view(gradOutput, batchSize, -1) - end - - self._gradBias:expandAs(self._gradBias, self._gradOutput) - - --expandAs uses stride 0 and self._gradBias is not contiguous - --cuda ops may assume contiguous input - if torch.type(self._gradBias) == 'torch.CudaTensor' then - self._repeat:resizeAs(self._gradBias):copy(self._gradBias) - self._repeat:add(scale, self._gradOutput) - self._gradBias:copy(self._repeat) - else - self._gradBias:add(scale, self._gradOutput) - end - end -end - -function CAdd:type(type, tensorCache) - if type then - self:clearState() - end - return parent.type(self, type, tensorCache) -end - -function CAdd:clearState() - nn.utils.clear(self, { - '_gradBias', - '_expand', - '_output', - '_bias', - '_repeat' - }) - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/CAddTable.lua b/contrib/lua-torch/nn/CAddTable.lua deleted file mode 100644 index 79deb7e9b4..0000000000 --- a/contrib/lua-torch/nn/CAddTable.lua +++ /dev/null @@ -1,36 +0,0 @@ -local CAddTable, parent = torch.class('nn.CAddTable', 'nn.Module') - -function CAddTable:__init(ip) - parent.__init(self) - self.inplace = ip - self.gradInput = {} -end - -function CAddTable:updateOutput(input) - if self.inplace then - self.output:set(input[1]) - else - self.output:resizeAs(input[1]):copy(input[1]) - end - for i=2,#input do - self.output:add(input[i]) - end - return self.output -end - -function CAddTable:updateGradInput(input, gradOutput) - for i=1,#input do - self.gradInput[i] = self.gradInput[i] or input[1].new() - if self.inplace then - self.gradInput[i]:set(gradOutput) - else - self.gradInput[i]:resizeAs(input[i]):copy(gradOutput) - end - end - - for i=#input+1, #self.gradInput do - self.gradInput[i] = nil - end - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/CAddTensorTable.lua b/contrib/lua-torch/nn/CAddTensorTable.lua deleted file mode 100644 index 16efe44502..0000000000 --- a/contrib/lua-torch/nn/CAddTensorTable.lua +++ /dev/null @@ -1,43 +0,0 @@ - -local CAddTensorTable, parent = torch.class('nn.CAddTensorTable', 'nn.Module') - -function CAddTensorTable:__init() - parent.__init(self) - self.gradInput = {} -end - --- input is a table with 2 entries. input[1] is the vector to be added. --- input[2] is the table to which we add the vector -function CAddTensorTable:updateOutput(input) - local currentOutput = {} - for i=1,#input[2] do - currentOutput[i] = currentOutput[i] or input[1].new() - currentOutput[i]:resizeAs(input[1]) - currentOutput[i]:copy(input[2][i]) - currentOutput[i]:add(input[1]) - end - for i = #input[2]+1, #currentOutput do - currentOutput[i] = nil - end - self.output = currentOutput - return self.output -end - -function CAddTensorTable:updateGradInput(input, gradOutput) - self.gradInput[1] = self.gradInput[1] or input[1].new() - self.gradInput[1]:resizeAs(input[1]) - self.gradInput[1]:copy(gradOutput[1]) - for i=2, #input[2] do - self.gradInput[1]:add(gradOutput[i]) - end - self.gradInput[2] = self.gradInput[2] or {} - for i=1,#input[2] do - self.gradInput[2][i] = self.gradInput[2][i] or input[1].new() - self.gradInput[2][i]:resizeAs(input[1]) - self.gradInput[2][i]:copy(gradOutput[i]) - end - for i=#input[2]+1, #self.gradInput[2] do - self.gradInput[2][i] = nil - end - return self.gradInput -end \ No newline at end of file diff --git a/contrib/lua-torch/nn/CDivTable.lua b/contrib/lua-torch/nn/CDivTable.lua deleted file mode 100644 index bf044c9afb..0000000000 --- a/contrib/lua-torch/nn/CDivTable.lua +++ /dev/null @@ -1,26 +0,0 @@ - -local CDivTable, parent = torch.class('nn.CDivTable', 'nn.Module') - -function CDivTable:__init() - parent.__init(self) - self.gradInput = {} -end - -function CDivTable:updateOutput(input) - self.output:resizeAs(input[1]):copy(input[1]) - self.output:cdiv(input[2]) - return self.output -end - -function CDivTable:updateGradInput(input, gradOutput) - self.gradInput[1] = self.gradInput[1] or input[1].new() - self.gradInput[2] = self.gradInput[2] or input[1].new() - self.gradInput[1]:resizeAs(input[1]):copy(gradOutput):cdiv(input[2]) - self.gradInput[2]:resizeAs(input[2]):zero():addcdiv(-1,self.gradInput[1],input[2]):cmul(input[1]) - - for i=#input+1, #self.gradInput do - self.gradInput[i] = nil - end - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/CMakeLists.txt b/contrib/lua-torch/nn/CMakeLists.txt deleted file mode 100644 index cebddfbfc4..0000000000 --- a/contrib/lua-torch/nn/CMakeLists.txt +++ /dev/null @@ -1,14 +0,0 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR) -CMAKE_POLICY(VERSION 2.6) -INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/../torch7/lib/TH) -INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}/../torch7/lib/TH) -ADD_SUBDIRECTORY(lib) - -FILE(STRINGS lib/THNN/generic/THNN.h THNN_headers NEWLINE_CONSUME) -FILE(WRITE THNN_h.lua "return [[") -FILE(APPEND THNN_h.lua ${THNN_headers}) -FILE(APPEND THNN_h.lua "]]") - -FILE(GLOB luasrc *.lua) - -ADD_TORCH_PACKAGE(nn "" "${luasrc}") diff --git a/contrib/lua-torch/nn/CMaxTable.lua b/contrib/lua-torch/nn/CMaxTable.lua deleted file mode 100644 index 845e38d23d..0000000000 --- a/contrib/lua-torch/nn/CMaxTable.lua +++ /dev/null @@ -1,46 +0,0 @@ -local CMaxTable, parent = torch.class('nn.CMaxTable', 'nn.Module') - -function CMaxTable:__init() - parent.__init(self) - self.gradInput = {} - self.maxIdx = torch.Tensor() - self.mask = torch.Tensor() - self.maxVals = torch.Tensor() - self.gradMaxVals = torch.Tensor() -end - -function CMaxTable:updateOutput(input) - self.output:resizeAs(input[1]):copy(input[1]) - self.maxIdx:resizeAs(input[1]):fill(1) - for i=2,#input do - self.maskByteTensor = self.maskByteTensor or - (torch.type(self.output) == 'torch.CudaTensor' and - torch.CudaByteTensor() or torch.ByteTensor()) - self.mask:gt(input[i], self.output) - self.maskByteTensor:resize(self.mask:size()):copy(self.mask) - self.maxIdx:maskedFill(self.maskByteTensor, i) - self.maxVals:maskedSelect(input[i], self.maskByteTensor) - self.output:maskedCopy(self.maskByteTensor, self.maxVals) - end - return self.output -end - -function CMaxTable:updateGradInput(input, gradOutput) - for i=1,#input do - self.gradInput[i] = self.gradInput[i] or input[i].new() - self.gradInput[i]:resizeAs(input[i]):fill(0.0) - self.maskByteTensor = self.maskByteTensor or - (torch.type(self.output) == 'torch.CudaTensor' and - torch.CudaByteTensor() or torch.ByteTensor()) - self.mask:eq(self.maxIdx, i) - self.maskByteTensor:resize(self.mask:size()):copy(self.mask) - self.gradMaxVals:maskedSelect(gradOutput, self.maskByteTensor) - self.gradInput[i]:maskedCopy(self.maskByteTensor, self.gradMaxVals) - end - - for i=#input+1, #self.gradInput do - self.gradInput[i] = nil - end - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/CMinTable.lua b/contrib/lua-torch/nn/CMinTable.lua deleted file mode 100644 index 25b9a19a2d..0000000000 --- a/contrib/lua-torch/nn/CMinTable.lua +++ /dev/null @@ -1,46 +0,0 @@ -local CMinTable, parent = torch.class('nn.CMinTable', 'nn.Module') - -function CMinTable:__init() - parent.__init(self) - self.gradInput = {} - self.minIdx = torch.Tensor() - self.mask = torch.Tensor() - self.minVals = torch.Tensor() - self.gradMaxVals = torch.Tensor() -end - -function CMinTable:updateOutput(input) - self.output:resizeAs(input[1]):copy(input[1]) - self.minIdx:resizeAs(input[1]):fill(1) - for i=2,#input do - self.maskByteTensor = self.maskByteTensor or - (torch.type(self.output) == 'torch.CudaTensor' and - torch.CudaByteTensor() or torch.ByteTensor()) - self.mask:lt(input[i], self.output) - self.maskByteTensor:resize(self.mask:size()):copy(self.mask) - self.minIdx:maskedFill(self.maskByteTensor, i) - self.minVals:maskedSelect(input[i], self.maskByteTensor) - self.output:maskedCopy(self.maskByteTensor, self.minVals) - end - return self.output -end - -function CMinTable:updateGradInput(input, gradOutput) - for i=1,#input do - self.gradInput[i] = self.gradInput[i] or input[i].new() - self.gradInput[i]:resizeAs(input[i]):fill(0.0) - self.maskByteTensor = self.maskByteTensor or - (torch.type(self.output) == 'torch.CudaTensor' and - torch.CudaByteTensor() or torch.ByteTensor()) - self.mask:eq(self.minIdx, i) - self.maskByteTensor:resize(self.mask:size()):copy(self.mask) - self.gradMaxVals:maskedSelect(gradOutput, self.maskByteTensor) - self.gradInput[i]:maskedCopy(self.maskByteTensor, self.gradMaxVals) - end - - for i=#input+1, #self.gradInput do - self.gradInput[i] = nil - end - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/CMul.lua b/contrib/lua-torch/nn/CMul.lua deleted file mode 100644 index 890169761b..0000000000 --- a/contrib/lua-torch/nn/CMul.lua +++ /dev/null @@ -1,166 +0,0 @@ -local CMul, parent = torch.class('nn.CMul', 'nn.Module') - -function CMul:__init(...) - parent.__init(self) - - local arg = {...} - - self.size = torch.LongStorage() - local n = #arg - if n == 1 and torch.type(arg[1]) == 'torch.LongStorage' then - self.size:resize(#arg[1]):copy(arg[1]) - else - self.size:resize(n) - for i=1,n do - self.size[i] = arg[i] - end - end - - self.weight = torch.Tensor(self.size) - self.gradWeight = torch.Tensor(self.size) - - self.output:resize(self.size) - - self:reset() -end - -function CMul:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1./math.sqrt(self.weight:nElement()) - end - self.weight:uniform(-stdv,stdv) -end - -function CMul:updateOutput(input) - -- lazy-initialize - self._output = self._output or input.new() - self._weight = self._weight or input.new() - self._expand = self._expand or input.new() - self._repeat = self._repeat or input.new() - - self.output:resizeAs(input):copy(input) - if input:nElement() == self.weight:nElement() then - self._output:view(self.output, -1) - self._weight:view(self.weight, -1) - - self._output:cmul(self._weight) - else - if self.weight:dim() == input:dim() then - self._output:set(self.output) - self._weight:set(self.weight) - else - local batchSize = input:size(1) - self._output:view(self.output, batchSize, -1) - self._weight:view(self.weight, 1, -1) - end - - self._expand:expandAs(self._weight, self._output) - - if torch.type(input) == 'torch.CudaTensor' then - self._repeat:resizeAs(self._expand):copy(self._expand) - self._output:cmul(self._repeat) - else - self._output:cmul(self._expand) - end - end - - return self.output -end - -function CMul:updateGradInput(input, gradOutput) - if not self.gradInput then - return - end - - self._gradOutput = self._gradOutput or input.new() - self._gradInput = self._gradInput or input.new() - - self.gradInput:resizeAs(input):zero() - if self.weight:nElement() == gradOutput:nElement() then - self.gradInput:addcmul(1, self.weight, gradOutput) - else - if self.weight:dim() == input:dim() then - nn.utils.contiguousView(self._gradOutput, gradOutput, gradOutput:size()) - nn.utils.contiguousView(self._gradInput, self.gradInput, self.gradInput:size()) - self._weight:set(self.weight) - else - local batchSize = input:size(1) - nn.utils.contiguousView(self._gradOutput, gradOutput, batchSize, -1) - nn.utils.contiguousView(self._gradInput, self.gradInput, batchSize, -1) - self._weight:view(self.weight, 1, -1) - end - - self._expand:expandAs(self._weight, self._gradOutput) - - if torch.type(input) == 'torch.CudaTensor' then - self._repeat:resizeAs(self._expand):copy(self._expand) - self._gradInput:addcmul(1, self._repeat, self._gradOutput) - else - self._gradInput:addcmul(1, self._expand, self._gradOutput) - end - end - - return self.gradInput -end - -function CMul:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - - self._input = self._input or input.new() - self._gradWeight = self._gradWeight or input.new() - self._sum = self._sum or input.new() - - if self.weight:nElement() == gradOutput:nElement() then - self.gradWeight:addcmul(scale, input, gradOutput) - else - if self.weight:dim() == input:dim() then - nn.utils.contiguousView(self._input, input, input:size()) - nn.utils.contiguousView(self._gradOutput, gradOutput, gradOutput:size()) - self._gradWeight:set(self.gradWeight) - - self._repeat:cmul(self._input, self._gradOutput) - local sumInto = self._sum - local sumFrom = self._repeat - for i=1,self.weight:dim() do - if self.weight:size(i) ~= input:size(i) then - sumInto:sum(sumFrom, i) - sumInto = sumFrom - sumFrom = sumFrom == self._repeat and self._sum or self._repeat - end - end - self._gradWeight:add(scale, sumFrom) - else - local batchSize = input:size(1) - nn.utils.contiguousView(self._input, input, batchSize, -1) - nn.utils.contiguousView(self._gradOutput, gradOutput, batchSize, -1) - self._gradWeight:view(self.gradWeight, 1, -1) - - self._repeat:cmul(self._input, self._gradOutput) - self._sum:sum(self._repeat, 1) - self._gradWeight:add(scale, self._sum) - end - - end -end - -function CMul:type(type, tensorCache) - if type then - self:clearState() - end - return parent.type(self, type, tensorCache) -end - -function CMul:clearState() - nn.utils.clear(self, { - '_input', - '_output', - '_weight', - '_gradWeight', - '_expand', - '_repeat', - '_sum', - }) - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/CMulTable.lua b/contrib/lua-torch/nn/CMulTable.lua deleted file mode 100644 index b47378e831..0000000000 --- a/contrib/lua-torch/nn/CMulTable.lua +++ /dev/null @@ -1,55 +0,0 @@ - -local CMulTable, parent = torch.class('nn.CMulTable', 'nn.Module') - -function CMulTable:__init() - parent.__init(self) - self.gradInput = {} -end - -function CMulTable:updateOutput(input) - self.output:resizeAs(input[1]):copy(input[1]) - for i=2,#input do - self.output:cmul(input[i]) - end - return self.output -end - -function CMulTable:updateGradInput_efficient(input, gradOutput) - self.tout = self.tout or input[1].new() - self.tout:resizeAs(self.output) - for i=1,#input do - self.gradInput[i] = self.gradInput[i] or input[1].new() - self.gradInput[i]:resizeAs(input[i]):copy(gradOutput) - self.tout:copy(self.output):cdiv(input[i]) - self.gradInput[i]:cmul(self.tout) - end - - for i=#input+1, #self.gradInput do - self.gradInput[i] = nil - end - - return self.gradInput -end - -function CMulTable:updateGradInput(input, gradOutput) - for i=1,#input do - self.gradInput[i] = self.gradInput[i] or input[1].new() - self.gradInput[i]:resizeAs(input[i]):copy(gradOutput) - for j=1,#input do - if i~=j then - self.gradInput[i]:cmul(input[j]) - end - end - end - - for i=#input+1, #self.gradInput do - self.gradInput[i] = nil - end - - return self.gradInput -end - -function CMulTable:clearState() - if self.tout then self.tout:set() end - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/CONTRIBUTING.md b/contrib/lua-torch/nn/CONTRIBUTING.md deleted file mode 100644 index cc800154e6..0000000000 --- a/contrib/lua-torch/nn/CONTRIBUTING.md +++ /dev/null @@ -1,136 +0,0 @@ -# Contributing to Torch7 Core (torch7, nn, cutorch, cunn) - -Thanks a lot! There are plenty of ways you can help! - -Please take a moment to review this document in order to make the contribution -process easy and effective for everyone involved. - -Following these guidelines helps to communicate that you respect the time of -the developers managing and developing this open source project. In return, -they should reciprocate that respect in addressing your issue or assessing -patches and features. - - -## Using the issue tracker - -The [issue tracker](https://github.com/torch/nn/issues) is -the preferred channel for [bug reports](#bugs), [features requests](#features) -and [submitting pull requests](#pull-requests), but please respect the following -restrictions: - -* Please **do not** use the issue tracker for personal support requests (use - [mailing-list](http://groups.google.com/forum/#!forum/torch7)). - -* Please **do not** open issues regarding the code in a torch package - outside the core. For example don't open issues about the - REPL in the nn issue tracker, use the trepl issue tracker for that. - - -## Bug reports - -A bug is a _demonstrable problem_ that is caused by the code in the repository. -Good bug reports are extremely helpful - thank you! - -Guidelines for bug reports: - -1. **Use the GitHub issue search** — check if the issue has already been - reported. - -2. **Check if the issue has been fixed** — try to reproduce it using the - latest `master` or development branch in the repository. - -3. **Isolate the problem** — ideally create test case that is within reason, - preferably within 100 lines of code. - -A good bug report shouldn't leave others needing to chase you up for more -information. Please try to be as detailed as possible in your report. What is -your environment? What steps will reproduce the issue? What OS do you -experience the problem? What would you expect to be the outcome? All these -details will help people to fix any potential bugs. - - -## Feature requests - -Feature requests are welcome to be filed. Torch is community-developed, -the maintainers are not exclusive torch developers, so keep that in mind. -The purpose of feature requests is for others who are looking to implement -a feature are aware of the interest in the feature. - - - -## Pull requests - -Good pull requests - patches, improvements, new features - are a fantastic -help. They should remain focused in scope **and avoid containing unrelated -commits.** - -**Please ask first** before embarking on any significant pull request (e.g. -implementing features, refactoring code, porting to a different language), -otherwise you risk spending a lot of time working on something that the -project's developers might not want to merge into the project. - -Please adhere to the coding conventions used throughout a project (indentation, -accurate comments, etc.) and any other requirements (such as test coverage). - -Adhering to the following this process is the best way to get your work -included in the project: - -1. [Fork](https://help.github.com/articles/fork-a-repo) the project, clone your - fork, and configure the remotes: - - ```bash - # Clone your fork of the repo into the current directory - git clone https://github.com//nn.git - # Navigate to the newly cloned directory - cd nn - # Assign the original repo to a remote called "upstream" - git remote add upstream https://github.com/torch/nn.git - ``` - -2. If you cloned a while ago, get the latest changes from upstream: - - ```bash - git checkout master - git pull upstream master - ``` - -3. Create a new topic branch (off the main project development branch) to - contain your feature, change, or fix: - - ```bash - git checkout -b - ``` - -4. Commit your changes in logical chunks. Please try to adhere to these [git commit - message guidelines](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) - . Use Git's [interactive rebase](https://help.github.com/articles/about-git-rebase) - feature to tidy up your commits before making them public. This helps us keep the - commit history in logical blocks and clean, as torch grows. - For example: - - If you are adding a new function or a module, keep the module + tests + doc - to a single commit unless logically warranted. - - If you are fixing a bug, keep the bugfix to a single commit unless logically warranted. - -5. Locally merge (or rebase) the upstream development branch into your topic branch: - - ```bash - git pull [--rebase] upstream master - ``` - -6. Push your topic branch up to your fork: - - ```bash - git push origin - ``` - -7. [Open a Pull Request](https://help.github.com/articles/using-pull-requests/) - with a clear title and description. - -**IMPORTANT**: By submitting a patch, you agree to allow the project owners to -license your work under the terms of the BSD License. - -## Development workflow tips - -* While you are changing lua files, one can simply symlink the cloned nn directory to ~/torch/install/share/lua/5.1/nn so that any change is reflected in the current install, without constantly having to do luarocks make rocks/* -* If you are changing C files, then, after every change, you run luarocks make rocks/* -* To test, you can just use: th -lnn -e "nn.test()" diff --git a/contrib/lua-torch/nn/COPYRIGHT.txt b/contrib/lua-torch/nn/COPYRIGHT.txt deleted file mode 100644 index bc002b78ab..0000000000 --- a/contrib/lua-torch/nn/COPYRIGHT.txt +++ /dev/null @@ -1,36 +0,0 @@ -Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) -Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) -Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) -Copyright (c) 2011-2013 NYU (Clement Farabet) -Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) -Copyright (c) 2006 Idiap Research Institute (Samy Bengio) -Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the names of Deepmind Technologies, NYU, NEC Laboratories America - and IDIAP Research Institute nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/lua-torch/nn/CReLU.lua b/contrib/lua-torch/nn/CReLU.lua deleted file mode 100644 index 8da6e79740..0000000000 --- a/contrib/lua-torch/nn/CReLU.lua +++ /dev/null @@ -1,57 +0,0 @@ -local CReLU, parent = torch.class('nn.CReLU', 'nn.Sequential') - --- Implements the CReLU activation function as described by --- W. Shang et al. in "Understanding and Improving Convolutional Neural Networks --- via Concatenated Rectified Linear Units" -function CReLU:__init(nInputDims, inplace) - parent.__init(self) - self.nInputDims = nInputDims - self.inplace = inplace or false - - local concatTable = nn.ConcatTable() - concatTable:add(nn.Identity()) - concatTable:add(nn.MulConstant(-1)) - self:add(concatTable) - self:add(nn.JoinTable(2)) - self:add(nn.ReLU(self.inplace)) -end - -function CReLU:updateOutput(input) - local input_ - local batched = input:dim() == (self.nInputDims + 1) - if not batched then - input_ = input:view(1, -1) - else - input_ = input:view(input:size(1), -1) - end - parent.updateOutput(self, input_) - local osize = input:size() - if not batched then - osize[1] = osize[1] * 2 - else - osize[2] = osize[2] * 2 - end - self.output:resize(osize) - return self.output -end - -function CReLU:backward(input, gradOutput) - return self:updateGradInput(input, gradOutput) -end - -function CReLU:updateGradInput(input, gradOutput) - local batched = input:dim() == (self.nInputDims + 1) - if not batched then - parent.updateGradInput(self, input:view(1, -1), gradOutput:view(1, -1)) - else - parent.updateGradInput(self, input:view(input:size(1), -1), - gradOutput:view(input:size(1), -1)) - end - - self.gradInput:resizeAs(input) - return self.gradInput -end - -function CReLU:__tostring__() - return "CReLU()" -end diff --git a/contrib/lua-torch/nn/CSubTable.lua b/contrib/lua-torch/nn/CSubTable.lua deleted file mode 100644 index eb7492055a..0000000000 --- a/contrib/lua-torch/nn/CSubTable.lua +++ /dev/null @@ -1,26 +0,0 @@ - -local CSubTable, parent = torch.class('nn.CSubTable', 'nn.Module') - -function CSubTable:__init() - parent.__init(self) - self.gradInput = {} -end - -function CSubTable:updateOutput(input) - self.output:resizeAs(input[1]):copy(input[1]) - self.output:add(-1,input[2]) - return self.output -end - -function CSubTable:updateGradInput(input, gradOutput) - self.gradInput[1] = self.gradInput[1] or input[1].new() - self.gradInput[2] = self.gradInput[2] or input[1].new() - self.gradInput[1]:resizeAs(input[1]):copy(gradOutput) - self.gradInput[2]:resizeAs(input[2]):copy(gradOutput):mul(-1) - - for i=#input+1, #self.gradInput do - self.gradInput[i] = nil - end - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Clamp.lua b/contrib/lua-torch/nn/Clamp.lua deleted file mode 100644 index 36397a1579..0000000000 --- a/contrib/lua-torch/nn/Clamp.lua +++ /dev/null @@ -1,5 +0,0 @@ -local Clamp, Parent = torch.class('nn.Clamp', 'nn.HardTanh') - -function Clamp:__init(min_value, max_value) - Parent.__init(self, min_value, max_value) -end diff --git a/contrib/lua-torch/nn/ClassNLLCriterion.lua b/contrib/lua-torch/nn/ClassNLLCriterion.lua deleted file mode 100644 index dae0e66854..0000000000 --- a/contrib/lua-torch/nn/ClassNLLCriterion.lua +++ /dev/null @@ -1,82 +0,0 @@ -local THNN = require 'nn.THNN' -local ClassNLLCriterion, parent = torch.class('nn.ClassNLLCriterion', 'nn.Criterion') - -function ClassNLLCriterion:__init(weights, sizeAverage, ignoreIndex) - parent.__init(self) - self.sizeAverage = (sizeAverage == nil) and true or sizeAverage - self.ignoreIndex = ignoreIndex or -100 -- this target index will be ignored - if weights then - assert(weights:dim() == 1, "weights input should be 1-D Tensor") - self.weights = weights - end - - self.output_tensor = torch.zeros(1) - self.total_weight_tensor = torch.ones(1) - self.target = torch.zeros(1):long() -end - -function ClassNLLCriterion:__len() - if (self.weights) then - return #self.weights - else - return 0 - end -end - -function ClassNLLCriterion:updateOutput(input, target) - if type(target) == 'number' then - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.target = torch.CudaLongTensor and self.target:cudaLong() or self.target:cuda() - else - self.target = self.target:long() - end - self.target:resize(1) - self.target[1] = target - elseif torch.typename(input):find('torch%.Cuda.*Tensor') then - self.target = torch.CudaLongTensor and target:cudaLong() or target - else - self.target = target:long() - end - - input.THNN.ClassNLLCriterion_updateOutput( - input:cdata(), - self.target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage, - THNN.optionalTensor(self.weights), - self.total_weight_tensor:cdata(), - self.ignoreIndex - ) - self.output = self.output_tensor[1] - return self.output, self.total_weight_tensor[1] -end - -function ClassNLLCriterion:updateGradInput(input, target) - if type(target) == 'number' then - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.target = torch.CudaLongTensor and self.target:cudaLong() or self.target:cuda() - else - self.target = self.target:long() - end - self.target:resize(1) - self.target[1] = target - elseif torch.typename(input):find('torch%.Cuda.*Tensor') then - self.target = torch.CudaLongTensor and target:cudaLong() or target - else - self.target = target:long() - end - - self.gradInput:resizeAs(input):zero() - - input.THNN.ClassNLLCriterion_updateGradInput( - input:cdata(), - self.target:cdata(), - self.gradInput:cdata(), - self.sizeAverage, - THNN.optionalTensor(self.weights), - self.total_weight_tensor:cdata(), - self.ignoreIndex - ) - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/ClassSimplexCriterion.lua b/contrib/lua-torch/nn/ClassSimplexCriterion.lua deleted file mode 100644 index 9cabc011fb..0000000000 --- a/contrib/lua-torch/nn/ClassSimplexCriterion.lua +++ /dev/null @@ -1,118 +0,0 @@ -local ClassSimplexCriterion, parent - = torch.class('nn.ClassSimplexCriterion', 'nn.MSECriterion') - ---[[ - This file implements a criterion for multi-class classification. - It learns an embedding per class, where each class' embedding - is a point on an (N-1)-dimensional simplex, where N is - the number of classes. - For example usage of this class, look at doc/criterion.md - - Reference: http://arxiv.org/abs/1506.08230 - -]]-- - - ---[[ - function regsplex(n): - regsplex returns the coordinates of the vertices of a - regular simplex centered at the origin. - The Euclidean norms of the vectors specifying the vertices are - all equal to 1. The input n is the dimension of the vectors; - the simplex has n+1 vertices. - - input: - n -- dimension of the vectors specifying the vertices of the simplex - - output: - a -- tensor dimensioned (n+1,n) whose rows are - vectors specifying the vertices - - reference: - http://en.wikipedia.org/wiki/Simplex#Cartesian_coordinates_for_regular_n-dimensional_simplex_in_Rn ---]] -local function regsplex(n) - local a = torch.zeros(n+1,n) - - for k = 1,n do - -- determine the last nonzero entry in the vector for the k-th vertex - if k==1 then a[k][k] = 1 end - if k>1 then a[k][k] = math.sqrt( 1 - a[{ {k},{1,k-1} }]:norm()^2 ) end - - -- fill the k-th coordinates for the vectors of the remaining vertices - local c = (a[k][k]^2 - 1 - 1/n) / a[k][k] - a[{ {k+1,n+1},{k} }]:fill(c) - end - - return a -end - - -function ClassSimplexCriterion:__init(nClasses) - parent.__init(self) - assert(nClasses and nClasses > 1 and nClasses == (nClasses -(nClasses % 1)), - "Required positive integer argument nClasses > 1") - self.nClasses = nClasses - - -- embedding the simplex in a space of dimension strictly greater than - -- the minimum possible (nClasses-1) is critical for effective training. - local simp = regsplex(nClasses - 1) - self.simplex = torch.cat(simp, - torch.zeros(simp:size(1), nClasses -simp:size(2)), - 2) - self._target = torch.Tensor(nClasses) -end - --- handle target being both 1D tensor, and --- target being 2D tensor (2D tensor means don't do anything) -local function transformTarget(self, target) - if torch.type(target) == 'number' then - self._target:resize(self.nClasses) - self._target:copy(self.simplex[target]) - elseif torch.isTensor(target) then - assert(target:dim() == 1, '1D tensors only!') - local nSamples = target:size(1) - self._target:resize(nSamples, self.nClasses) - for i=1,nSamples do - self._target[i]:copy(self.simplex[target[i]]) - end - end -end - -function ClassSimplexCriterion:updateOutput(input, target) - transformTarget(self, target) - assert(input:nElement() == self._target:nElement()) - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.MSECriterion_updateOutput( - input:cdata(), - self._target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage - ) - self.output = self.output_tensor[1] - return self.output -end - -function ClassSimplexCriterion:updateGradInput(input, target) - assert(input:nElement() == self._target:nElement()) - input.THNN.MSECriterion_updateGradInput( - input:cdata(), - self._target:cdata(), - self.gradInput:cdata(), - self.sizeAverage - ) - return self.gradInput -end - -function ClassSimplexCriterion:getPredictions(input) - if input:dim() == 1 then - input = input:view(1, -1) - end - return torch.mm(input, self.simplex:t()) -end - -function ClassSimplexCriterion:getTopPrediction(input) - local prod = self:getPredictions(input) - local _, maxs = prod:max(prod:nDimension()) - return maxs:view(-1) -end diff --git a/contrib/lua-torch/nn/Collapse.lua b/contrib/lua-torch/nn/Collapse.lua deleted file mode 100644 index a088608ca0..0000000000 --- a/contrib/lua-torch/nn/Collapse.lua +++ /dev/null @@ -1,30 +0,0 @@ -local Collapse, parent = torch.class('nn.Collapse', 'nn.Module') - --- collapses non-batch dims -function Collapse:__init(nInputDim) - parent.__init(self) - self.nInputDim = nInputDim -end - -function Collapse:updateOutput(input) - if not input:isContiguous() then - self._input = self._input or input.new() - self._input:resize(input:size()):copy(input) - input = self._input - end - if input:dim() > self.nInputDim then - self.output:view(input,input:size(1),-1) - else - self.output:view(input,-1) - end - return self.output -end - -function Collapse:updateGradInput(input, gradOutput) - self.gradInput:view(gradOutput, input:size()) - return self.gradInput -end - -function Collapse:clearState() - self._input = nil -end diff --git a/contrib/lua-torch/nn/Concat.lua b/contrib/lua-torch/nn/Concat.lua deleted file mode 100644 index d7e3ee711f..0000000000 --- a/contrib/lua-torch/nn/Concat.lua +++ /dev/null @@ -1,158 +0,0 @@ -local Concat, parent = torch.class('nn.Concat', 'nn.Container') - -function Concat:__init(dimension) - parent.__init(self) - self.outputSize = torch.LongStorage() - self.dimension = dimension -end - -function Concat:updateOutput(input) - self.outputSize = self.outputSize or torch.LongStorage() - - local outs = {} - for i=1,#self.modules do - local currentOutput = self:rethrowErrors(self.modules[i], i, 'updateOutput', input) - outs[i] = currentOutput - if i == 1 then - self.outputSize:resize(currentOutput:dim()):copy(currentOutput:size()) - else - self.outputSize[self.dimension] = self.outputSize[self.dimension] + currentOutput:size(self.dimension) - end - end - self.output:resize(self.outputSize) - - local offset = 1 - for i,module in ipairs(self.modules) do - local currentOutput = outs[i] - self.output:narrow(self.dimension, offset, currentOutput:size(self.dimension)):copy(currentOutput) - offset = offset + currentOutput:size(self.dimension) - end - return self.output -end - -local function retable(t1, t2, f) - for k, v in ipairs(t2) do - if (torch.type(v) == "table") then - t1[k] = retable(t1[k] or {}, t2[k], f) - else - f(t1, k, v) - end - end - for i=#t2+1, #t1 do - t1[i] = nil - end - return t1 -end - -local function backward(self, method, input, gradOutput, scale) - local isTable = torch.type(input) == 'table' - local wasTable = torch.type(self.gradInput) == 'table' - scale = scale or 1 - - if isTable then - local offset = 1 - for i,module in ipairs(self.modules) do - local currentOutput = module.output - local currentGradInput = self:rethrowErrors(module, i, method, input, - gradOutput:narrow(self.dimension, offset, currentOutput:size(self.dimension)), scale) - if torch.type(currentGradInput) ~= 'table' then - error"currentGradInput is not a table!" - end - if #input ~= #currentGradInput then - error("table size mismatch: "..#input.." ~= "..#currentGradInput) - end - if i == 1 then - self.gradInput = wasTable and self.gradInput or {} - retable(self.gradInput, currentGradInput, - function(t, k, v) - t[k] = t[k] or v:clone() - t[k]:resizeAs(v) - t[k]:copy(v) - end - ) - else - retable(self.gradInput, currentGradInput, - function(t, k, v) - if t[k] then - t[k]:add(v) - else - t[k] = v:clone() - end - end - ) - end - offset = offset + currentOutput:size(self.dimension) - end - else - self.gradInput = (not wasTable) and self.gradInput:resizeAs(input) or input:clone() - local offset = 1 - for i,module in ipairs(self.modules) do - local currentOutput = module.output - local currentGradInput = self:rethrowErrors(module, i, method, input, - gradOutput:narrow(self.dimension, offset, currentOutput:size(self.dimension)), scale) - if currentGradInput then -- if the module does not produce a gradInput (for example first layer), then ignore it and move on. - if i==1 then - self.gradInput:copy(currentGradInput) - else - self.gradInput:add(currentGradInput) - end - end - offset = offset + currentOutput:size(self.dimension) - end - end - return self.gradInput -end - -function Concat:updateGradInput(input, gradOutput) - return backward(self, 'updateGradInput', input, gradOutput) -end - -function Concat:backward(input, gradOutput, scale) - return backward(self, 'backward', input, gradOutput, scale) -end - -function Concat:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - local offset = 1 - for i,module in ipairs(self.modules) do - local currentOutput = module.output - self:rethrowErrors(module, i, 'accGradParameters', input, - gradOutput:narrow(self.dimension, offset, currentOutput:size(self.dimension)), - scale) - offset = offset + currentOutput:size(self.dimension) - end -end - -function Concat:accUpdateGradParameters(input, gradOutput, lr) - local offset = 1 - for i,module in ipairs(self.modules) do - local currentOutput = module.output - self:rethrowErrors(module, i, 'accUpdateGradParameters', - input, - gradOutput:narrow(self.dimension, offset, currentOutput:size(self.dimension)), - lr) - offset = offset + currentOutput:size(self.dimension) - end -end - -function Concat:__tostring__() - local tab = ' ' - local line = '\n' - local next = ' |`-> ' - local lastNext = ' `-> ' - local ext = ' | ' - local extlast = ' ' - local last = ' ... -> ' - local str = torch.type(self) - str = str .. ' {' .. line .. tab .. 'input' - for i=1,#self.modules do - if i == #self.modules then - str = str .. line .. tab .. lastNext .. '(' .. i .. '): ' .. tostring(self.modules[i]):gsub(line, line .. tab .. extlast) - else - str = str .. line .. tab .. next .. '(' .. i .. '): ' .. tostring(self.modules[i]):gsub(line, line .. tab .. ext) - end - end - str = str .. line .. tab .. last .. 'output' - str = str .. line .. '}' - return str -end diff --git a/contrib/lua-torch/nn/ConcatTable.lua b/contrib/lua-torch/nn/ConcatTable.lua deleted file mode 100644 index 7427193442..0000000000 --- a/contrib/lua-torch/nn/ConcatTable.lua +++ /dev/null @@ -1,118 +0,0 @@ -local ConcatTable, parent = torch.class('nn.ConcatTable', 'nn.Container') - -function ConcatTable:__init() - parent.__init(self) - self.modules = {} - self.output = {} -end - -function ConcatTable:updateOutput(input) - for i=1,#self.modules do - self.output[i] = self:rethrowErrors(self.modules[i], i, 'updateOutput', input) - end - return self.output -end - -local function retable(t1, t2, f) - for k, v in ipairs(t2) do - if (torch.type(v) == "table") then - t1[k] = retable(t1[k] or {}, t2[k], f) - else - f(t1, k, v) - end - end - for i=#t2+1, #t1 do - t1[i] = nil - end - return t1 -end - -local function backward(self, method, input, gradOutput, scale) - local isTable = torch.type(input) == 'table' - local wasTable = torch.type(self.gradInput) == 'table' - if isTable then - for i,module in ipairs(self.modules) do - local currentGradInput = self:rethrowErrors(module, i, method, input, gradOutput[i], scale) - if torch.type(currentGradInput) ~= 'table' then - error"currentGradInput is not a table!" - end - if #input ~= #currentGradInput then - error("table size mismatch: "..#input.." ~= "..#currentGradInput) - end - if i == 1 then - self.gradInput = wasTable and self.gradInput or {} - retable(self.gradInput, currentGradInput, - function(t, k, v) - t[k] = t[k] or v:clone() - t[k]:resize(v:size()) - t[k]:copy(v) - end - ) - else - retable(self.gradInput, currentGradInput, - function(t, k, v) - if t[k] then - t[k]:add(v) - else - t[k] = v:clone() - end - end - ) - end - end - else - self.gradInput = (not wasTable) and self.gradInput or input:clone() - for i,module in ipairs(self.modules) do - local currentGradInput = self:rethrowErrors(module, i, method, input, gradOutput[i], scale) - if i == 1 then - self.gradInput:resize(currentGradInput:size()):copy(currentGradInput) - else - self.gradInput:add(currentGradInput) - end - end - end - return self.gradInput -end - -function ConcatTable:updateGradInput(input, gradOutput) - return backward(self, 'updateGradInput', input, gradOutput) -end - -function ConcatTable:backward(input, gradOutput, scale) - return backward(self, 'backward', input, gradOutput, scale) -end - -function ConcatTable:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - for i,module in ipairs(self.modules) do - self:rethrowErrors(module, i, 'accGradParameters', input, gradOutput[i], scale) - end -end - -function ConcatTable:accUpdateGradParameters(input, gradOutput, lr) - for i,module in ipairs(self.modules) do - self:rethrowErrors(module, i, 'accUpdateGradParameters', input, gradOutput[i], lr) - end -end - -function ConcatTable:__tostring__() - local tab = ' ' - local line = '\n' - local next = ' |`-> ' - local lastNext = ' `-> ' - local ext = ' | ' - local extlast = ' ' - local last = ' ... -> ' - local str = torch.type(self) - str = str .. ' {' .. line .. tab .. 'input' - for i=1,#self.modules do - if i == #self.modules then - str = str .. line .. tab .. lastNext .. '(' .. i .. '): ' .. tostring(self.modules[i]):gsub(line, line .. tab .. extlast) - else - str = str .. line .. tab .. next .. '(' .. i .. '): ' .. tostring(self.modules[i]):gsub(line, line .. tab .. ext) - end - end - str = str .. line .. tab .. last .. 'output' - str = str .. line .. '}' - return str -end diff --git a/contrib/lua-torch/nn/Constant.lua b/contrib/lua-torch/nn/Constant.lua deleted file mode 100644 index 07773feb22..0000000000 --- a/contrib/lua-torch/nn/Constant.lua +++ /dev/null @@ -1,36 +0,0 @@ ------------------------------------------------------------------------- ---[[ Constant ]]-- --- Outputs a constant value given an input. --- If nInputDim is specified, uses the input to determine the size of --- the batch. The value is then replicated over the batch. --- You can use this with nn.ConcatTable() to append constant inputs to --- an input : nn.ConcatTable():add(nn.Constant(v)):add(nn.Identity()) . ------------------------------------------------------------------------- -local Constant, parent = torch.class("nn.Constant", "nn.Module") - -function Constant:__init(value, nInputDim) - self.value = value - if torch.type(self.value) == 'number' then - self.value = torch.Tensor{self.value} - end - assert(torch.isTensor(self.value), "Expecting number or tensor at arg 1") - self.nInputDim = nInputDim - parent.__init(self) -end - -function Constant:updateOutput(input) - if self.nInputDim and input:dim() > self.nInputDim then - local vsize = self.value:size():totable() - self.output:resize(input:size(1), table.unpack(vsize)) - local value = self.value:view(1, table.unpack(vsize)) - self.output:copy(value:expand(self.output:size())) - else - self.output:resize(self.value:size()):copy(self.value) - end - return self.output -end - -function Constant:updateGradInput(input, gradOutput) - self.gradInput:resizeAs(input):zero() - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Container.lua b/contrib/lua-torch/nn/Container.lua deleted file mode 100644 index 7e264bab90..0000000000 --- a/contrib/lua-torch/nn/Container.lua +++ /dev/null @@ -1,149 +0,0 @@ --- This is code common to container modules, which are collections of --- smaller constituent modules like Parallel, Sequential, etc. -local Container, parent = torch.class('nn.Container', 'nn.Module') - -function Container:__init(...) - parent.__init(self, ...) - self.modules = {} -end - -function Container:add(module) - table.insert(self.modules, module) - return self -end - -function Container:get(index) - return self.modules[index] -end - -function Container:size() - return #self.modules -end - --- Check if passing arguments through xpcall is supported in this Lua interpreter. -local _, XPCALL_ARGS = xpcall(function(x) return x ~= nil end, function() end, 1) -local TRACEBACK_WARNING = "WARNING: If you see a stack trace below, it doesn't point to the place where this error occurred. Please use only the one above." --- module argument can be retrieved with moduleIndex, but code is cleaner when --- it has to be specified anyway. -function Container:rethrowErrors(module, moduleIndex, funcName, ...) - assert(module == self.modules[moduleIndex], - "mismatch between moduleIndex and self.modules in rethrowErrors") - local function handleError(err) - -- This will be executed only in the first container that handles the error. - if not err:find(TRACEBACK_WARNING) then - local traceback = debug.traceback() - -- Remove this handler from the stack - local _, first_line_end = traceback:find('^.-\n') - local _, second_line_end = traceback:find('^.-\n.-\n') - traceback = traceback:sub(1, first_line_end) .. traceback:sub(second_line_end+1) - err = err .. '\n' .. traceback .. '\n\n' .. TRACEBACK_WARNING - else - -- Remove file path - err = err:sub(err:find('\n')+1) - end - local msg = string.format('In %d module of %s:', - moduleIndex, torch.type(self)) - -- Preceding newline has to be here, because Lua will prepend a file path. - err = '\n' .. msg .. '\n' .. err - return err - end - - -- Lua 5.1 doesn't support passing arguments through xpcall, so they have to - -- be passed via a closure. This incurs some overhead, so it's better not to - -- make it the default. - local ok, ret, noret - if not XPCALL_ARGS then - local args = {...} - local unpack = unpack or table.unpack - ok, ret, noret = xpcall(function() - return module[funcName](module, unpack(args)) - end, - handleError) - else - ok, ret, noret = xpcall(module[funcName], handleError, module, ...) - end - assert(noret == nil, "rethrowErrors supports only one return argument") - - if not ok then error(ret) end - return ret -end - -function Container:applyToModules(func) - for _, module in ipairs(self.modules) do - func(module) - end -end - -function Container:zeroGradParameters() - self:applyToModules(function(module) module:zeroGradParameters() end) -end - -function Container:updateParameters(learningRate) - self:applyToModules(function(module) module:updateParameters(learningRate) end) -end - -function Container:training() - self:applyToModules(function(module) module:training() end) - parent.training(self) -end - -function Container:evaluate() - self:applyToModules(function(module) module:evaluate() end) - parent.evaluate(self) -end - -function Container:share(mlp, ...) - for i=1,#self.modules do - self.modules[i]:share(mlp.modules[i], ...); - end - return self -end - -function Container:reset(stdv) - self:applyToModules(function(module) module:reset(stdv) end) -end - -function Container:parameters() - local function tinsert(to, from) - if type(from) == 'table' then - for i=1,#from do - tinsert(to,from[i]) - end - else - table.insert(to,from) - end - end - local w = {} - local gw = {} - for i=1,#self.modules do - local mw,mgw = self.modules[i]:parameters() - if mw then - tinsert(w,mw) - tinsert(gw,mgw) - end - end - return w,gw -end - -function Container:clearState() - -- don't call set because it might reset referenced tensors - local function clear(f) - if self[f] then - if torch.isTensor(self[f]) then - self[f] = self[f].new() - elseif type(self[f]) == 'table' then - self[f] = {} - else - self[f] = nil - end - end - end - clear('output') - clear('gradInput') - if self.modules then - for i,module in pairs(self.modules) do - module:clearState() - end - end - return self -end diff --git a/contrib/lua-torch/nn/Contiguous.lua b/contrib/lua-torch/nn/Contiguous.lua deleted file mode 100755 index f9974ce5ae..0000000000 --- a/contrib/lua-torch/nn/Contiguous.lua +++ /dev/null @@ -1,21 +0,0 @@ -local Contiguous, parent = torch.class('nn.Contiguous', 'nn.Module') - -function Contiguous:updateOutput(input) - if not input:isContiguous() then - if self.output:storage() == input:storage() then self.output:set() end - self.output:resizeAs(input):copy(input) - else - self.output:set(input) - end - return self.output -end - -function Contiguous:updateGradInput(input, gradOutput) - if not gradOutput:isContiguous() then - if self.gradInput:storage() == gradOutput:storage() then self.gradInput:set() end - self.gradInput:resizeAs(gradOutput):copy(gradOutput) - else - self.gradInput:set(gradOutput) - end - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Convert.lua b/contrib/lua-torch/nn/Convert.lua deleted file mode 100644 index 855338dd67..0000000000 --- a/contrib/lua-torch/nn/Convert.lua +++ /dev/null @@ -1,245 +0,0 @@ ------------------------------------------------------------------------- ---[ nn.Convert ]-- --- Module to convert between different data formats --- nn.Convert('bchw', 'bf') or nn.Convert('chw', 'f') --- Automatically converts input to same type as self.output --- Simplest use is for automatic input type converions : nn.Convert() ------------------------------------------------------------------------- -local _ = require 'moses' -local Convert, parent = torch.class("nn.Convert", "nn.Container") - -function Convert:__init(inputShape, outputShape) - if outputShape and not inputShape then - error"Expecting non-nil arg 1 when arg 2 is provided" - end - inputShape = inputShape or 'b*' - outputShape = outputShape or inputShape - self.inputShape = inputShape:find('b') and inputShape or ('b'..inputShape) - self.outputShape = outputShape:find('b') and outputShape or ('b'..outputShape) - self.inputBatchDim = self.inputShape:find('b') - self.outputBatchDim = self.outputShape:find('b') - if self.inputShape == 'b*' or self.outputShape == 'b*' then - assert(self.inputShape == 'b*' and self.outputShape == 'b*', 'Both or neither shapes must be b*') - self.nInputDim = -1 - self.nOutputDim = -1 - self.transposition = true - else - -- number of dims in batch mode - self.nInputDim = #self.inputShape - self.nOutputDim = #self.outputShape - -- is the outputShape just a transposition of the inputShape? - if self.nInputDim == self.nOutputDim then - self.transposition = true - for i=1,self.nInputDim do - if not self.outputShape:find(self.inputShape:sub(i,i)) then - self.transposition = false - break - end - end - end - end - parent.__init(self) -end - --- post-initialization -function Convert:buildConverter(input) - if self.transposition then - self.converter = self:transpose(self.outputShape) - else - if (torch.type(self[self.outputShape]) ~= 'function') then - error(string.format("Unrecognized conversion of shape %s to %s", self.inputShape, self.outputShape)) - end - self.converter = self[self.outputShape](self, input) - end - assert(torch.isTensor(self.output), "Expecting Tensor output") - - self.converter:type(torch.type(self.output)) - - self.modules[1] = self.converter -end - -function Convert:updateOutput(input) - assert(torch.isTensor(input), "expecting Tensor") - if not torch.isTypeOf(input, torch.type(self.output)) then - -- handle different input type - self._input = self._input or self.output.new() - self._input:resize(input:size()):copy(input) - input = self._input - end - self.batchMode = true - if input:dim() < self.nInputDim then - -- handle non-batch mode - local inputSize = input:size():totable() - table.insert(inputSize, self.inputBatchDim, 1) - self.__input = self.__input or input.new() - self.__input:set(input):resize(table.unpack(inputSize)) - input = self.__input - self.batchMode = false - end - if not self.converter then - self:buildConverter(input) - end - - self.output = self.converter:updateOutput(input) - - if not self.batchMode then - local outputSize = self.output:size():totable() - table.remove(outputSize, self.outputBatchDim) - self.__output = self.__output or self.output.new() - self.__output:set(self.output):resize(table.unpack(outputSize)) - self.output = self.__output - end - return self.output -end - -function Convert:updateGradInput(input, gradOutput) - local input_ = input - input = self._input or input - if not self.batchMode then - input = self.__input - self.__gradOutput = self.__gradOutput or gradOutput.new() - self.__gradOutput:set(gradOutput):resize(self.converter.output:size()) - gradOutput = self.__gradOutput - end - - local gradInput = self.converter:updateGradInput(input, gradOutput) - - if not self.batchMode then - self.__gradInput = self.__gradInput or gradInput.new() - self.__gradInput:set(gradInput):resize(input_:size()) - gradInput = self.__gradInput - end - if self._input then - self._gradInput = self._gradInput or input.new() - self._gradInput:resize(input:size()):copy(gradInput) - self.gradInput = self._gradInput - else - self.gradInput = gradInput - end - - return self.gradInput -end - -function Convert:accGradParameters(input, gradOutput, scale) - input = self.batchMode and self.__input or self._input or input - gradOutput = self.batchMode and self.__gradOutput or gradOutput - self.converter:accGradParameters(input, gradOutput, scale) -end - -function Convert:accUpdateGradParameters(input, gradOutput, lr) - input = self.batchMode and self.__input or self._input or input - gradOutput = self.batchMode and self.__gradOutput or gradOutput - self.converter:accUpdateGradParameters(input, gradOutput, lr) -end - --- batch feature -function Convert:bf(input) - local b_pos = self:findAxis('b', self.inputShape) - local dim = #self.inputShape - if self.inputShape == 'bt' then - error"Conversion of shape bt to bf not supported: open an issue on github" - end - -- was b - if dim == 1 then - return nn.Reshape(1) - end - -- was b... - local modula - if b_pos ~= 1 then - modula = nn.Transpose({1, b_pos}) - end - if dim > 2 then - local transpose = modula - local sampleSize = input:select(self:findAxis('b'),1):nElement() - local reshape = nn.Reshape(sampleSize) - if transpose then - modula = nn.Sequential() - modula:add(transpose) - modula:add(reshape) - else - modula = reshape - end - end - return modula or nn.Identity() -end - --- each example is a scalar; batch is a vector -function Convert:b(input) - local b_pos = self:findAxis('b') - if self.inputShape == 'bt' or self.inputShape == 'tb' then - local t_pos = self:findAxis('t') - -- select first set of classes - return nn.Select(t_pos, 1) - elseif self.inputShape == 'bf' or self.inputShape == 'fb' then - -- this wont work as expected with size(f) > 1 - local f_pos = self:findAxis('f') - if input:size(f_pos) > 1 then - error("Cannot convert shape "..self.inputShape.." to b when feature > 1") - end - return nn.Select(f_pos, 1) - else - error("Cannot convert shape "..self.inputShape.." to shape b") - end -end - --- returns the current shape of the data -function Convert:default() - return nn.Identity() -end - --- multi-class (batch target) -function Convert:bt() - local b_pos = self:findAxis('b') - local modula - if self.inputShape == 'b' then - modula = nn.Reshape(1) - else - error("cannot convert shape '"..self.inputShape.."' to bt") - end - return modula -end - --- a generic function for transposing shape axes -function Convert:transpose(newShape) - if newShape == self.inputShape then - return nn.Identity() - end - local inputShape = {} - for i=1,#self.inputShape do - table.insert(inputShape, self.inputShape:sub(i,i)) - end - local transpositions = {} - for i=1,#newShape do - local j = _.indexOf(inputShape, newShape:sub(i,i)) - if i ~= j then - local char = inputShape[i] - inputShape[i] = inputShape[j] - inputShape[j] = char - table.insert(transpositions, {j, i}) - end - end - return nn.Transpose(table.unpack(transpositions)) -end - -function Convert:findAxis(axis_char, shape, silent) - shape = shape or self.inputShape - local axis_pos = shape:find(axis_char) - if (not silent) and (not axis_pos) then - error("Provided shape '"..shape.."' has no axis '"..axis_char.."'", 2) - end - return axis_pos -end - -function Convert:clearState() - self._input = nil - self._gradInput = nil - self.__input = nil - self.__output = nil - self.__gradInput = nil - self.__gradOutput = nil -end - -function Convert:type(type) - self:clearState() - return parent.type(self, type) -end diff --git a/contrib/lua-torch/nn/Copy.lua b/contrib/lua-torch/nn/Copy.lua deleted file mode 100644 index 9f83cf9b44..0000000000 --- a/contrib/lua-torch/nn/Copy.lua +++ /dev/null @@ -1,42 +0,0 @@ -local Copy, parent = torch.class('nn.Copy', 'nn.Module') - -function Copy:__init(intype, outtype, forceCopy, dontCast) - intype = intype or torch.Tensor.__typename - outtype = outtype or torch.Tensor.__typename - - self.dontCast = dontCast - - parent.__init(self) - self.gradInput = torch.getmetatable(intype).new() - self.output = torch.getmetatable(outtype).new() - - if (not forceCopy) and intype == outtype then - - self.updateOutput = function(self, input) - self.output:set(input) - return input - end - - self.updateGradInput = function(self, input, gradOutput) - self.gradInput:set(gradOutput) - return gradOutput - end - end -end - -function Copy:updateOutput(input) - self.output:resize(input:size()):copy(input) - return self.output -end - -function Copy:updateGradInput(input, gradOutput) - self.gradInput:resize(gradOutput:size()):copy(gradOutput) - return self.gradInput -end - -function Copy:type(type, tensorCache) - if type and self.dontCast then - return self - end - return parent.type(self, type, tensorCache) -end diff --git a/contrib/lua-torch/nn/Cosine.lua b/contrib/lua-torch/nn/Cosine.lua deleted file mode 100644 index 19a9cba823..0000000000 --- a/contrib/lua-torch/nn/Cosine.lua +++ /dev/null @@ -1,175 +0,0 @@ -local Cosine, parent = torch.class('nn.Cosine', 'nn.Module') - -function Cosine:__init(inputSize,outputSize) - parent.__init(self) - - self.weight = torch.Tensor(outputSize,inputSize) - self.gradWeight = torch.Tensor(outputSize,inputSize) - - self:reset() -end - -function Cosine:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1./math.sqrt(self.weight:size(1)) - end - self.weight:uniform(-stdv, stdv) -end - -function Cosine:updateOutput(input) - local inputSize = self.weight:size(2) - local outputSize = self.weight:size(1) - - self._weightNorm = self._weightNorm or self.weight.new() - self._inputNorm = self._inputNorm or self.weight.new() - - -- y_j = (w_j * x) / ( || w_j || * || x || ) - - self._weightNorm:norm(self.weight,2,2):add(1e-12) - if input:dim() == 1 then - self.output:resize(outputSize):zero() - self.output:addmv(1, self.weight, input) - self.__norm = input:norm()+1e-12 - self.output:cdiv(self._weightNorm:view(outputSize)):div(self.__norm) - elseif input:dim() == 2 then - local batchSize = input:size(1) - local nElement = self.output:nElement() - self.output:resize(batchSize, outputSize) - if self.output:nElement() ~= nElement then - self.output:zero() - end - self.output:addmm(0, self.output, 1, input, self.weight:t()) - - self._inputNorm:norm(input,2,2):add(1e-12) - self.output:cdiv(self._weightNorm:view(1,outputSize):expandAs(self.output)) - self.output:cdiv(self._inputNorm:expandAs(self.output)) - else - error('input must be vector or matrix') - end - - return self.output -end - -function Cosine:updateGradInput(input, gradOutput) - if not self.gradInput then - return - end - - local inputSize = self.weight:size(2) - local outputSize = self.weight:size(1) - - --[[ - dy_j w_ji x_i - ---- = ------------------- - y_j --------- - dx_i || w_j || * || x || || x ||^2 - --]] - - local nElement = self.gradInput:nElement() - self.gradInput:resizeAs(input) - if self.gradInput:nElement() ~= nElement then - self.gradInput:zero() - end - - if input:dim() == 1 then - self._weight = self._weight or input.new() - self._weight:resizeAs(self.weight):copy(self.weight) - self._weight:cdiv(self._weightNorm:expandAs(self.weight)) - self._weight:div(self.__norm) - self._weight:addr(1, self._weight, -1/(self.__norm*self.__norm), self.output, input) - self.gradInput:addmv(0, 1, self._weight:t(), gradOutput) - elseif input:dim() == 2 then - local inputNorm = self._inputNorm:expandAs(input) - local weightNorm = self._weightNorm:view(1,outputSize):expandAs(gradOutput) - - self.gradInput:copy(input):cdiv(inputNorm) - self._gradOutput = self._gradOutput or gradOutput.new() - self._gradOutput:resizeAs(gradOutput):copy(gradOutput) - self._gradOutput:cmul(self.output) - self._sum = self._sum or input.new() - self._sum:sum(self._gradOutput, 2) - self.gradInput:cmul(self._sum:expandAs(input)) - - self._gradOutput:resizeAs(gradOutput):copy(gradOutput) - self._gradOutput:cdiv(weightNorm) - self.gradInput:addmm(-1, self.gradInput, 1, self._gradOutput, self.weight) - - self.gradInput:cdiv(inputNorm) - end - - return self.gradInput -end - -function Cosine:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - local inputSize = self.weight:size(2) - local outputSize = self.weight:size(1) - - --[[ - dy_j x_i w_ji - ----- = ------------------- - y_j ----------- - dw_ji || w_j || * || x || || w_j ||^2 - --]] - - if input:dim() == 1 then - self._gradOutput = self._gradOutput or gradOutput.new() - self._gradOutput:resizeAs(gradOutput):copy(gradOutput) - local weightNorm = self._weightNorm:view(outputSize) - self._gradOutput:cdiv(weightNorm) - self.gradWeight:addr(scale/self.__norm, self._gradOutput, input) - - self._gradOutput:cdiv(weightNorm) - self._gradOutput:cmul(self.output) - self._weight = self._weight or self.weight.new() - self._weight:resizeAs(self._weight):copy(self.weight) - self._weight:cmul(self._gradOutput:view(outputSize, 1):expandAs(self.weight)) - self.gradWeight:add(-1, self._weight) - elseif input:dim() == 2 then - self._weight = self._weight or self.weight.new() - self._weight:resizeAs(self.weight):copy(self.weight) - self._gradOutput = self._gradOutput or gradOutput.new() - self._gradOutput:resizeAs(gradOutput):copy(gradOutput) - self._gradOutput:cmul(self.output) - self._sum = self._sum or input.new() - self._sum:sum(self._gradOutput, 1) - local grad = self._sum[1] - grad:cdiv(self._weightNorm:select(2,1)) - self._weight:cmul(grad:view(outputSize,1):expandAs(self._weight)) - - local input_ = self._gradOutput - input_:resizeAs(input):copy(input) - input_:cdiv(self._inputNorm:expandAs(input)) - self._weight:addmm(-1, self._weight, 1, gradOutput:t(), input_) - - self._weight:cdiv(self._weightNorm:expandAs(self._weight)) - self.gradWeight:add(self._weight) - else - error"1D or 2D input expected" - end -end - -function Cosine:type(type, tensorCache) - if type then - -- prevent premature memory allocations - self._input = nil - self._weight = nil - self._inputNorm = nil - self._weightNorm = nil - self._gradOutput = nil - self._sum = nil - end - return parent.type(self, type, tensorCache) -end - -function Cosine:clearState() - nn.utils.clear(self, { - '_input', - '_weight', - '_gradOutput', - '_sum', - '_inputNorm', - '_weightNorm', - }) - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/CosineDistance.lua b/contrib/lua-torch/nn/CosineDistance.lua deleted file mode 100644 index fe4e4b9f52..0000000000 --- a/contrib/lua-torch/nn/CosineDistance.lua +++ /dev/null @@ -1,116 +0,0 @@ -local CosineDistance, parent = torch.class('nn.CosineDistance', 'nn.Module') - -function CosineDistance:__init() - parent.__init(self) - self.gradInput = {torch.Tensor(), torch.Tensor()} -end - -local function makeContiguous(self, input1, input2) - if not input1:isContiguous() then - self._input1 = self._input1 or input1.new() - self._input1:resizeAs(input1):copy(input1) - input1 = self._input1 - end - if not input2:isContiguous() then - self._input2 = self._input2 or input2.new() - self._input2:resizeAs(input2):copy(input2) - input2 = self._input2 - end - return input1, input2 -end - -function CosineDistance:updateOutput(input) - local input1, input2 = input[1], input[2] - - input1, input2 = makeContiguous(self, input1, input2) - - if input1:dim() == 1 then - input1 = input1:view(1,-1) - input2 = input2:view(1,-1) - end - - if not self.buffer then - self.buffer = input1.new() - self.w1 = input1.new() - self.w22 = input1.new() - self.w = input1.new() - self.w32 = input1.new() - self.ones = input1.new() - end - - self.buffer:cmul(input1,input2) - self.w1:sum(self.buffer,2) - - local epsilon = 1e-12 - self.buffer:cmul(input1,input1) - self.w22:sum(self.buffer,2):add(epsilon) - self.ones:resizeAs(self.w22):fill(1) - self.w22:cdiv(self.ones, self.w22) - self.w:resizeAs(self.w22):copy(self.w22) - - self.buffer:cmul(input2,input2) - self.w32:sum(self.buffer,2):add(epsilon) - self.w32:cdiv(self.ones, self.w32) - self.w:cmul(self.w32) - self.w:sqrt() - - self.output:cmul(self.w1,self.w) - self.output:resize(input1:size(1)) - - return self.output -end - -function CosineDistance:updateGradInput(input, gradOutput) - local v1 = input[1] - local v2 = input[2] - local not_batch = false - - v1, v2 = makeContiguous(self, v1, v2) - - if v1:dim() == 1 then - v1 = v1:view(1,-1) - v2 = v2:view(1,-1) - not_batch = true - end - - if #self.gradInput ~= 2 then - self.gradInput[1] = self.gradInput[1] or v1.new() - self.gradInput[2] = self.gradInput[2] or v1.new() - end - - local gw1 = self.gradInput[1] - local gw2 = self.gradInput[2] - gw1:resizeAs(v1):copy(v2) - gw2:resizeAs(v1):copy(v1) - - self.buffer:cmul(self.w1,self.w22) - gw1:addcmul(-1,self.buffer:expandAs(v1),v1) - gw1:cmul(self.w:expandAs(v1)) - - self.buffer:cmul(self.w1,self.w32) - gw2:addcmul(-1,self.buffer:expandAs(v1),v2) - gw2:cmul(self.w:expandAs(v1)) - - local go = gradOutput:view(-1,1):expandAs(v1) - gw1:cmul(go) - gw2:cmul(go) - - if not_batch then - self.gradInput[1]:resize(gw1:size(2)) - self.gradInput[2]:resize(gw2:size(2)) - end - - return self.gradInput -end - -function CosineDistance:clearState() - nn.utils.clear(self, { - 'buffer', - 'w1', - 'w22', - 'w', - 'w32', - 'ones', - }) - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/CosineEmbeddingCriterion.lua b/contrib/lua-torch/nn/CosineEmbeddingCriterion.lua deleted file mode 100644 index d55e031309..0000000000 --- a/contrib/lua-torch/nn/CosineEmbeddingCriterion.lua +++ /dev/null @@ -1,142 +0,0 @@ -local CosineEmbeddingCriterion, parent = torch.class('nn.CosineEmbeddingCriterion', 'nn.Criterion') - -function CosineEmbeddingCriterion:__init(margin) - parent.__init(self) - margin = margin or 0 - self.margin = margin - self.gradInput = {torch.Tensor(), torch.Tensor()} - self.sizeAverage = true -end - -function CosineEmbeddingCriterion:updateOutput(input,y) - - local input1, input2 = input[1], input[2] - - -- keep backward compatibility - if type(y) == 'number' then - self._y = self._y or input1.new(1) - self._y[1] = y - y = self._y - end - - if input1:dim() == 1 then - input1 = input1:view(1,-1) - input2 = input2:view(1,-1) - end - - if not self.buffer then - self.buffer = input1.new() - self.w1 = input1.new() - self.w22 = input1.new() - self.w = input1.new() - self.w32 = input1.new() - self._outputs = input1.new() - -- comparison operators behave differently from cuda/c implementations - if input1:type() == 'torch.CudaTensor' then - self._idx = input1.new() - else - self._idx = torch.ByteTensor() - end - end - - self.buffer:cmul(input1,input2) - self.w1:sum(self.buffer,2) - - local epsilon = 1e-12 - self.buffer:cmul(input1,input1) - self.w22:sum(self.buffer,2):add(epsilon) - -- self._outputs is also used as a temporary buffer - self._outputs:resizeAs(self.w22):fill(1) - self.w22:cdiv(self._outputs, self.w22) - self.w:resizeAs(self.w22):copy(self.w22) - - self.buffer:cmul(input2,input2) - self.w32:sum(self.buffer,2):add(epsilon) - self.w32:cdiv(self._outputs, self.w32) - self.w:cmul(self.w32) - self.w:sqrt() - - self._outputs:cmul(self.w1,self.w) - self._outputs = self._outputs:select(2,1) - - y.eq(self._idx,y,-1) - self._outputs[self._idx] = self._outputs[self._idx]:add(-self.margin):cmax(0) - y.eq(self._idx,y,1) - self._outputs[self._idx] = self._outputs[self._idx]:mul(-1):add(1) - - self.output = self._outputs:sum() - - if self.sizeAverage then - self.output = self.output/y:size(1) - end - - return self.output -end - -function CosineEmbeddingCriterion:updateGradInput(input, y) - - local v1 = input[1] - local v2 = input[2] - local not_batch = false - - -- keep backward compatibility - if type(y) == 'number' then - self._y = self._y or input1.new(1) - self._y[1] = y - y = self._y - end - - if v1:dim() == 1 then - v1 = v1:view(1,-1) - v2 = v2:view(1,-1) - not_batch = true - end - - local gw1 = self.gradInput[1] - local gw2 = self.gradInput[2] - gw1:resizeAs(v1):copy(v2) - gw2:resizeAs(v1):copy(v1) - - self.buffer:cmul(self.w1,self.w22) - gw1:addcmul(-1,self.buffer:expandAs(v1),v1) - gw1:cmul(self.w:expandAs(v1)) - - self.buffer:cmul(self.w1,self.w32) - gw2:addcmul(-1,self.buffer:expandAs(v1),v2) - gw2:cmul(self.w:expandAs(v1)) - - -- self._idx = self._outputs <= 0 - y.le(self._idx,self._outputs,0) - self._idx = self._idx:view(-1,1):expand(gw1:size()) - gw1[self._idx] = 0 - gw2[self._idx] = 0 - - y.eq(self._idx,y,1) - self._idx = self._idx:view(-1,1):expand(gw2:size()) - gw1[self._idx] = gw1[self._idx]:mul(-1) - gw2[self._idx] = gw2[self._idx]:mul(-1) - - if self.sizeAverage then - gw1:div(y:size(1)) - gw2:div(y:size(1)) - end - - if not_batch then - self.gradInput[1]:resize(gw1:size(2)) - self.gradInput[2]:resize(gw2:size(2)) - end - - return self.gradInput -end - -function CosineEmbeddingCriterion:type(type) - self._idx = nil - parent.type(self,type) - -- comparison operators behave differently from cuda/c implementations - if type == 'torch.CudaTensor' then - self._idx = torch.CudaTensor() - else - self._idx = torch.ByteTensor() - end - return self -end diff --git a/contrib/lua-torch/nn/Criterion.lua b/contrib/lua-torch/nn/Criterion.lua deleted file mode 100644 index e48f068760..0000000000 --- a/contrib/lua-torch/nn/Criterion.lua +++ /dev/null @@ -1,64 +0,0 @@ -local Criterion = torch.class('nn.Criterion') - -function Criterion:__init() - self.gradInput = torch.Tensor() - self.output = 0 -end - -function Criterion:updateOutput(input, target) -end - -function Criterion:forward(input, target) - return self:updateOutput(input, target) -end - -function Criterion:backward(input, target) - return self:updateGradInput(input, target) -end - -function Criterion:updateGradInput(input, target) -end - -function Criterion:clone() - local f = torch.MemoryFile("rw"):binary() - f:writeObject(self) - f:seek(1) - local clone = f:readObject() - f:close() - return clone -end - -function Criterion:type(type, tensorCache) - assert(type, 'Criterion: must provide a type to convert to') - -- find all tensors and convert them - for key,param in pairs(self) do - self[key] = nn.utils.recursiveType(param, type, tensorCache) - end - return self -end - -function Criterion:float() - return self:type('torch.FloatTensor') -end - -function Criterion:double() - return self:type('torch.DoubleTensor') -end - -function Criterion:cuda() - return self:type('torch.CudaTensor') -end - -function Criterion:cudaHalf() - return self:type('torch.CudaHalfTensor') -end - -function Criterion:cudaDouble() - return self:type('torch.CudaDoubleTensor') -end - -function Criterion:__call__(input, target) - self.output = self:forward(input, target) - self.gradInput = self:backward(input, target) - return self.output, self.gradInput -end diff --git a/contrib/lua-torch/nn/CriterionTable.lua b/contrib/lua-torch/nn/CriterionTable.lua deleted file mode 100644 index 14f67bd395..0000000000 --- a/contrib/lua-torch/nn/CriterionTable.lua +++ /dev/null @@ -1,17 +0,0 @@ -local CriterionTable, parent = torch.class('nn.CriterionTable', 'nn.Module') - -function CriterionTable:__init(criterion) - parent.__init(self) - self.criterion = criterion - self.gradInput = {criterion.gradInput} -end - -function CriterionTable:updateOutput(input) - self.output = self.criterion:updateOutput(table.unpack(input)) - return self.output -end - -function CriterionTable:updateGradInput(input, gradOutput) - self.criterion:updateGradInput(table.unpack(input)) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/CrossEntropyCriterion.lua b/contrib/lua-torch/nn/CrossEntropyCriterion.lua deleted file mode 100644 index 2f72cf87f4..0000000000 --- a/contrib/lua-torch/nn/CrossEntropyCriterion.lua +++ /dev/null @@ -1,42 +0,0 @@ -local CrossEntropyCriterion, Criterion = torch.class('nn.CrossEntropyCriterion', 'nn.Criterion') - -function CrossEntropyCriterion:__init(weights, sizeAverage) - Criterion.__init(self) - self.lsm = nn.LogSoftMax() - self.nll = nn.ClassNLLCriterion(weights, sizeAverage) - self.sizeAverage = self.nll.sizeAverage - self.oldSizeAverage = self.sizeAverage -end - -function CrossEntropyCriterion:updateOutput(input, target) - input = input:squeeze() - target = type(target) == 'number' and target or target:squeeze() - -- only propagate if value has changed to preserve old behavior - -- of setting nll.sizeAverage directly - if self.sizeAverage ~= self.oldSizeAverage then - self.nll.sizeAverage = self.sizeAverage - end - self.lsm:updateOutput(input) - self.nll:updateOutput(self.lsm.output, target) - self.output = self.nll.output - self.oldSizeAverage = self.sizeAverage - return self.output -end - -function CrossEntropyCriterion:updateGradInput(input, target) - local size = input:size() - input = input:squeeze() - target = type(target) == 'number' and target or target:squeeze() - -- only propagate if value has changed to preserve old behavior - -- of setting nll.sizeAverage directly - if self.sizeAverage ~= self.oldSizeAverage then - self.nll.sizeAverage = self.sizeAverage - end - self.nll:updateGradInput(self.lsm.output, target) - self.lsm:updateGradInput(input, self.nll.gradInput) - self.gradInput:view(self.lsm.gradInput, size) - self.oldSizeAverage = self.sizeAverage - return self.gradInput -end - -return nn.CrossEntropyCriterion diff --git a/contrib/lua-torch/nn/Decorator.lua b/contrib/lua-torch/nn/Decorator.lua deleted file mode 100644 index 05fb4db926..0000000000 --- a/contrib/lua-torch/nn/Decorator.lua +++ /dev/null @@ -1,47 +0,0 @@ -local Decorator, parent = torch.class("nn.Decorator", "nn.Container") - -function Decorator:__init(module) - parent.__init(self) - -- so that it can be handled like a Container - self.modules[1] = module -end - -function Decorator:updateOutput(input) - self.output = self.modules[1]:updateOutput(input) - return self.output -end - -function Decorator:updateGradInput(input, gradOutput) - self.gradInput = self.modules[1]:updateGradInput(input, gradOutput) - return self.gradInput -end - -function Decorator:accGradParameters(input, gradOutput, scale) - self.modules[1]:accGradParameters(input, gradOutput, scale) -end - -function Decorator:accUpdateGradParameters(input, gradOutput, lr) - self.modules[1]:accUpdateGradParameters(input, gradOutput, lr) -end - -function Decorator:sharedAccUpdateGradParameters(input, gradOutput, lr) - self.modules[1]:sharedAccUpdateGradParameters(input, gradOutput, lr) -end - -function Decorator:__tostring__() - if self.modules[1].__tostring__ then - return torch.type(self) .. ' @ ' .. self.modules[1]:__tostring__() - else - return torch.type(self) .. ' @ ' .. torch.type(self.modules[1]) - end -end - --- useful for multiple-inheritance -function Decorator.decorate(class) - class.updateOutput = nn.Decorator.updateOutput - class.updateGradInput = nn.Decorator.updateGradInput - class.accGradParameters = nn.Decorator.accGradParameters - class.accUpdateGradParameters = nn.Decorator.accUpdateGradParameters - class.sharedAccUpdateGradParameters = nn.Decorator.sharedAccUpdateGradParameters - class.__tostring__ = nn.Decorator.__tostring__ -end diff --git a/contrib/lua-torch/nn/DepthConcat.lua b/contrib/lua-torch/nn/DepthConcat.lua deleted file mode 100644 index f64a90eb80..0000000000 --- a/contrib/lua-torch/nn/DepthConcat.lua +++ /dev/null @@ -1,116 +0,0 @@ ------------------------------------------------------------------------- ---[[ DepthConcat ]]-- --- Concatenates the output of Convolutions along the depth dimension --- (nOutputFrame). This is used to implement the DepthConcat layer --- of the Going deeper with convolutions paper : --- http://arxiv.org/pdf/1409.4842v1.pdf --- The normal Concat Module can't be used since the spatial dimensions --- of tensors to be concatenated may have different values. To deal with --- this, we select the largest spatial dimensions and add zero-padding --- around the smaller dimensions. ------------------------------------------------------------------------- -local DepthConcat, _ = torch.class('nn.DepthConcat', 'nn.Concat') - -function DepthConcat:windowNarrow(output, currentOutput, offset) - local outputWindow = output:narrow(self.dimension, offset, currentOutput:size(self.dimension)) - for dim=1,self.outputSize:size(1) do - local currentSize = currentOutput:size(dim) - if dim ~= self.dimension and self.outputSize[dim] ~= currentSize then - -- 5x5 vs 3x3 -> start = [(5-3)/2] + 1 = 2 (1 pad each side) - -- 9x9 vs 5x5 -> start = [(9-5)/2] + 1 = 3 (2 pad each side) - -- 9x9 vs 4x4 -> start = [(9-4)/2] + 1 = 3.5 (2 pad, 3 pad) - local start = math.floor(((self.outputSize[dim] - currentSize) / 2) + 1) - outputWindow = outputWindow:narrow(dim, start, currentSize) - end - end - return outputWindow -end - -function DepthConcat:updateOutput(input) - self.outputSize = self.outputSize or torch.LongStorage() - - local outs = {} - for i=1,#self.modules do - local currentOutput = self:rethrowErrors(self.modules[i], i, 'updateOutput', input) - outs[i] = currentOutput - if i == 1 then - self.outputSize:resize(currentOutput:dim()):copy(currentOutput:size()) - else - self.outputSize[self.dimension] = self.outputSize[self.dimension] + currentOutput:size(self.dimension) - for dim=1,self.outputSize:size(1) do - if dim ~= self.dimension then - -- take the maximum size (shouldn't change anything for batch dim) - self.outputSize[dim] = math.max(self.outputSize[dim], currentOutput:size(dim)) - end - end - end - end - self.output:resize(self.outputSize):zero() --zero for padding - - local offset = 1 - for i,module in ipairs(self.modules) do - local currentOutput = outs[i] - local outputWindow = self:windowNarrow(self.output, currentOutput, offset) - outputWindow:copy(currentOutput) - offset = offset + currentOutput:size(self.dimension) - end - return self.output -end - -function DepthConcat:updateGradInput(input, gradOutput) - self.gradInput:resizeAs(input) - - local offset = 1 - for i,module in ipairs(self.modules) do - local currentOutput = module.output - local gradOutputWindow = self:windowNarrow(gradOutput, currentOutput, offset) - local currentGradInput = self:rethrowErrors(module, i, 'updateGradInput', input, gradOutputWindow) - if i==1 then - self.gradInput:copy(currentGradInput) - else - self.gradInput:add(currentGradInput) - end - offset = offset + currentOutput:size(self.dimension) - end - return self.gradInput -end - -function DepthConcat:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - local offset = 1 - for i,module in ipairs(self.modules) do - local currentOutput = module.output - local gradOutputWindow = self:windowNarrow(gradOutput, currentOutput, offset) - self:rethrowErrors(module, i, 'accGradParameters', input, gradOutputWindow, scale) - offset = offset + currentOutput:size(self.dimension) - end -end - -function DepthConcat:backward(input, gradOutput, scale) - self.gradInput:resizeAs(input) - - scale = scale or 1 - local offset = 1 - for i,module in ipairs(self.modules) do - local currentOutput = module.output - local gradOutputWindow = self:windowNarrow(gradOutput, currentOutput, offset) - local currentGradInput = self:rethrowErrors(module, i, 'backward', input, gradOutputWindow) - if i==1 then - self.gradInput:copy(currentGradInput) - else - self.gradInput:add(currentGradInput) - end - offset = offset + currentOutput:size(self.dimension) - end - return self.gradInput -end - -function DepthConcat:accUpdateGradParameters(input, gradOutput, lr) - local offset = 1 - for i,module in ipairs(self.modules) do - local currentOutput = module.output - local gradOutputWindow = self:windowNarrow(gradOutput, currentOutput, offset) - self:rethrowErrors(module, i, 'accUpdateGradParameters', input, gradOutputWindow, lr) - offset = offset + currentOutput:size(self.dimension) - end -end diff --git a/contrib/lua-torch/nn/DistKLDivCriterion.lua b/contrib/lua-torch/nn/DistKLDivCriterion.lua deleted file mode 100644 index bfad575671..0000000000 --- a/contrib/lua-torch/nn/DistKLDivCriterion.lua +++ /dev/null @@ -1,34 +0,0 @@ -local DistKLDivCriterion, parent = torch.class('nn.DistKLDivCriterion', 'nn.Criterion') - -function DistKLDivCriterion:__init() - parent.__init(self) - self.sizeAverage = true -end - -function DistKLDivCriterion:updateOutput(input, target) - assert(input:dim() == target:dim() and - torch.LongTensor(input:size()):eq(torch.LongTensor(target:size())):all(), - 'input and target should have the same size') - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.DistKLDivCriterion_updateOutput( - input:cdata(), - target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage - ) - self.output = self.output_tensor[1] - return self.output -end - -function DistKLDivCriterion:updateGradInput(input, target) - assert(input:dim() == target:dim() and - torch.LongTensor(input:size()):eq(torch.LongTensor(target:size())):all(), - 'input and target should have the same size') - input.THNN.DistKLDivCriterion_updateGradInput( - input:cdata(), - target:cdata(), - self.gradInput:cdata(), - self.sizeAverage - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/DistanceRatioCriterion.lua b/contrib/lua-torch/nn/DistanceRatioCriterion.lua deleted file mode 100644 index 6b79d06208..0000000000 --- a/contrib/lua-torch/nn/DistanceRatioCriterion.lua +++ /dev/null @@ -1,142 +0,0 @@ ---[[ - Probabilistic Criterion for Triplet Siamese Model for learning embedding. - Ref: https://arxiv.org/pdf/1610.00243.pdf - - loss = -log( exp(-X) / ( exp(-X) + exp(-Y) ) ) - where - X : Distance between similar samples - Y : Distance between dissimilar samples - - The loss could be break down to following log expansion - - loss = -log( exp(-X) ) - (-log( exp(-X) + exp(-Y) )) - = -log( exp(-X) ) + log( exp(-X) + exp(-Y) ) - = -(-X) + log( exp(-X) + exp(-Y) ) - = X + log( exp(-X) + exp(-Y) ) - - Gradients: - dLoss/dX = 1 + 1 / (exp(-X) + exp(-Y)) * -1 * exp(-X) - = 1 - exp(-X) / (exp(-X) + exp(-Y)) - - dLoss/dY = 0 + 1 / (exp(-X) + exp(-Y)) * -1 * exp(-Y) - = -exp(-Y) / (exp(-X) + exp(-Y)) - ---]] - -local DistanceRatioCriterion, parent = torch.class('nn.DistanceRatioCriterion', - 'nn.Criterion') - -function DistanceRatioCriterion:__init(sizeAverage) - parent.__init(self) - if sizeAverage ~= nil then - self.sizeAverage = sizeAverage - else - self.sizeAverage = true - end -end - --- Forward ---[[ --- X : Distance between similar samples --- Y : Distance between dissimilar samples - loss = -log( exp(-X) ) - (-log( exp(-X) + exp(-Y) )) - = -log( exp(-X) ) + log( exp(-X) + exp(-Y) ) - = -(-X) + log( exp(-X) + exp(-Y) ) - = X + log( exp(-X) + exp(-Y) ) ---]] -function DistanceRatioCriterion:updateOutput(input) - assert(#input == 2, "Invalid number of inputs") - - local X = input[1] - local Y = input[2] - - assert(X:nElement() == Y:nElement(), "Number of distances don't match.") - assert(X:size(1) == Y:size(1), "Invalid distances' size.") - - -- Compute exp(-X) and exp(-Y) - self._expMinusX = self._expMinusX or X.new() - self._expMinusY = self._expMinusY or Y.new() - - -- Compute ( exp(-X) + exp(-Y) ) - self._expMinusX:resizeAs(X):copy(X):mul(-1):exp() - self._expMinusY:resizeAs(Y):copy(Y):mul(-1):exp() - - self._sumExpMinusXY = self.sumExpMinusExp or X.new() - self._sumExpMinusXY:resizeAs(self._expMinusX):copy(self._expMinusX) - :add(self._expMinusY) - - -- Compute log( exp(-X) + exp(-Y) ) - self._logSumExpMinusXY = self._logSumExpMinusXY or self._sumExpMinusXY.new() - self._logSumExpMinusXY:resizeAs(self._sumExpMinusXY) - :copy(self._sumExpMinusXY):log() - - -- Compute log( exp(-X) + exp(-Y) ) - self.loss = self.loss or self._logSumExpMinusXY.new() - self.loss:resizeAs(X):copy(X):add(self._logSumExpMinusXY) - - if self.sizeAverage then - return self.loss:sum()/X:size(1) - else - return self.loss:sum() - end -end - --- Backward ---[[ --- X : Distance between similar samples --- Y : Distance between dissimilar samples - - Gradients: - dLoss/dX = 1 + 1 / (exp(-X) + exp(-Y)) * -1 * exp(-X) - = 1 - exp(-X) / (exp(-X) + exp(-Y)) - - dLoss/dY = 0 + 1 / (exp(-X) + exp(-Y)) * -1 * exp(-Y) - = -exp(-Y) / (exp(-X) + exp(-Y)) - ---]] -function DistanceRatioCriterion:updateGradInput(input) - assert(#input == 2, "Invalid number of inputs") - local X = input[1] - local Y = input[2] - assert(X:nElement() == Y:nElement(), "Number of distances don't match.") - assert(X:size(1) == Y:size(1), "Invalid distances' size.") - - -- dLoss/dX - -- -exp(-X) - self.dX = self.dX or X.new() - self.dX:resizeAs(self._expMinusX):copy(self._expMinusX):mul(-1) - - -- -exp(-X) / (exp(-X) + exp(-Y)) - self.dX:cdiv(self._sumExpMinusXY) - - -- 1 - exp(-X) / (exp(-X) + exp(-Y)) - self.dX:add(1) - - -- dLoss/dY - -- -exp(-Y) - self.dY = self.dY or Y.new() - self.dY:resizeAs(self._expMinusY):copy(self._expMinusY):mul(-1) - - -- -exp(-Y) / (exp(-X) + exp(-Y)) - self.dY:cdiv(self._sumExpMinusXY) - - if self.sizeAverage then - self.dX:div(X:size(1)) - self.dY:div(X:size(1)) - end - - return {self.dX, self.dY} -end - -function DistanceRatioCriterion:type(type, tensorCache) - if type then - self._expMinusX = nil - self._expMinusY = nil - self._sumExpMinusXY = nil - self._logSumExpMinusXY = nil - self.loss = nil - self.dX = nil - self.dY = nil - end - return parent.type(self, type, tensorCache) -end diff --git a/contrib/lua-torch/nn/DontCast.lua b/contrib/lua-torch/nn/DontCast.lua deleted file mode 100644 index b89f5436b9..0000000000 --- a/contrib/lua-torch/nn/DontCast.lua +++ /dev/null @@ -1,124 +0,0 @@ -local DontCast, parent = torch.class("nn.DontCast", "nn.Decorator") - --- utility functions - -local function recursiveTypeCopy(dst, src, type_str) - if torch.type(src) == 'table' then - dst = (torch.type(dst) == 'table') and dst or {} - for k, v in pairs(src) do - dst[k] = recursiveTypeCopy(dst[k], v, type_str) - end - elseif torch.isTensor(src) then - dst = (torch.type(dst) == type_str) and dst or torch.getmetatable(type_str).new() - dst:resize(src:size()) - if src:nElement() > 0 then - dst:copy(src) - end - end - return dst -end - -local function tableTensorType(src) - if type(src) == 'table' then - local type_str, found - for k,v in pairs(src) do - type_str, found = tableTensorType(v) - if found then - return type_str, true - end - end - return type_str, found - else - return torch.type(src), torch.isTensor(src) - end -end - --- DontCast methods and constructor - -function DontCast:__init(module, castin, castout, moduleType) - parent.__init(self, module) - self.castin = castin - self.castout = (castout == nil) and castin or castout - self.moduleType = moduleType - if (self.castin or self.castout) and not self.moduleType then - local moduleType, found = tableTensorType(module.output) - if found then - self.moduleType = moduleType - else - moduleType, found = tableTensorType(module:parameters()) - if found then - self.moduleType = moduleType - else - error"Cannot extrapolate moduleType. Provide constructor argument 4" - end - end - end -end - -function DontCast:updateOutput(input) - if self.castin and tableTensorType(input) ~= self.moduleType then - self._input = recursiveTypeCopy(self._input, input, self.moduleType) - input = self._input - end - - local output = self.modules[1]:updateOutput(input) - - if self.castout then - self.output = recursiveTypeCopy(self.output, output, tableTensorType(self.output)) - else - self.output = output - end - return self.output -end - -function DontCast:updateGradInput(input, gradOutput) - if self.castin and tableTensorType(input) ~= self.moduleType then - input = self._input - end - if self.castout and tableTensorType(gradOutput) ~= self.moduleType then - self._gradOutput = recursiveTypeCopy(self._gradOutput, gradOutput, self.moduleType) - gradOutput = self._gradOutput - end - - local gradInput = self.modules[1]:updateGradInput(input, gradOutput) - - if self.castin then - self.gradInput = recursiveTypeCopy(self.gradInput, gradInput, tableTensorType(self.gradInput)) - else - self.gradInput = gradInput - end - return self.gradInput -end - -function DontCast:accGradParameters(input, gradOutput, scale) - if self.castin and tableTensorType(input) ~= self.moduleType then - input = self._input - end - if self.castout and tableTensorType(gradOutput) ~= self.moduleType then - gradOutput = self._gradOutput - end - - self.modules[1]:accGradParameters(input, gradOutput, scale) -end - -function DontCast:accUpdateGradParameters(input, gradOutput, lr) - if self.castin and tableTensorType(input) ~= self.moduleType then - input = self._input - end - if self.castout and tableTensorType(gradOutput) ~= self.moduleType then - gradOutput = self._gradOutput - end - - self.modules[1]:accUpdateGradParameters(input, gradOutput, lr) -end - --- dont cast (the essence thereof) -function DontCast:type(type) - if self.castout and tableTensorType(self.output) ~= type then - self.output = recursiveTypeCopy(nil, self.output, type) - end - if self.castin and tableTensorType(self.gradInput) ~= type then - self.gradInput = recursiveTypeCopy(nil, self.gradInput, type) - end - return self -end diff --git a/contrib/lua-torch/nn/DotProduct.lua b/contrib/lua-torch/nn/DotProduct.lua deleted file mode 100644 index ccd347e6bd..0000000000 --- a/contrib/lua-torch/nn/DotProduct.lua +++ /dev/null @@ -1,61 +0,0 @@ -local DotProduct, parent = torch.class('nn.DotProduct', 'nn.Module') - -function DotProduct:__init() - parent.__init(self) - self.gradInput = {torch.Tensor(), torch.Tensor()} -end - -function DotProduct:updateOutput(input) - local input1, input2 = input[1], input[2] - if input1:dim() == 1 then - -- convert non batch input to batch input - input1 = input1:view(1,-1) - input2 = input2:view(1,-1) - end - if not self.buffer then - self.buffer = input1.new() - end - self.buffer:cmul(input1, input2) - self.output:sum(self.buffer, 2) - self.output:resize(input1:size(1)) - return self.output -end - -function DotProduct:updateGradInput(input, gradOutput) - local v1 = input[1] - local v2 = input[2] - local not_batch = false - - if #self.gradInput ~= 2 then - self.gradInput[1] = self.gradInput[1] or input[1].new() - self.gradInput[2] = self.gradInput[2] or input[2].new() - end - - if v1:dim() == 1 then - v1 = v1:view(1,-1) - v2 = v2:view(1,-1) - not_batch = true - end - - local gw1 = self.gradInput[1] - local gw2 = self.gradInput[2] - gw1:resizeAs(v1):copy(v2) - gw2:resizeAs(v2):copy(v1) - - local go = gradOutput:view(-1,1):expandAs(v1) - gw1:cmul(go) - gw2:cmul(go) - - if not_batch then - -- unbatch gradInput - self.gradInput[1]:set(gw1:select(1,1)) - self.gradInput[2]:set(gw2:select(1,1)) - end - - return self.gradInput -end - -function DotProduct:clearState() - if self.buffer then self.buffer:set() end - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/Dropout.lua b/contrib/lua-torch/nn/Dropout.lua deleted file mode 100644 index 15f2f46992..0000000000 --- a/contrib/lua-torch/nn/Dropout.lua +++ /dev/null @@ -1,70 +0,0 @@ -local Dropout, Parent = torch.class('nn.Dropout', 'nn.Module') - -function Dropout:__init(p,v1,inplace,stochasticInference) - Parent.__init(self) - self.p = p or 0.5 - self.train = true - self.inplace = inplace - self.stochastic_inference = stochasticInference or false - -- version 2 scales output during training instead of evaluation - self.v2 = not v1 - if self.p >= 1 or self.p < 0 then - error(' illegal percentage, must be 0 <= p < 1') - end - self.noise = torch.Tensor() -end - -function Dropout:updateOutput(input) - if self.inplace then - self.output:set(input) - else - self.output:resizeAs(input):copy(input) - end - if self.p > 0 then - if self.train or self.stochastic_inference then - self.noise:resizeAs(input) - self.noise:bernoulli(1-self.p) - if self.v2 then - self.noise:div(1-self.p) - end - self.output:cmul(self.noise) - elseif not self.v2 then - self.output:mul(1-self.p) - end - end - return self.output -end - -function Dropout:updateGradInput(input, gradOutput) - if self.inplace then - self.gradInput:set(gradOutput) - else - self.gradInput:resizeAs(gradOutput):copy(gradOutput) - end - if self.train then - if self.p > 0 then - self.gradInput:cmul(self.noise) -- simply mask the gradients with the noise vector - end - else - if not self.v2 and self.p > 0 then - self.gradInput:mul(1-self.p) - end - end - return self.gradInput -end - -function Dropout:setp(p) - self.p = p -end - -function Dropout:__tostring__() - return string.format('%s(%f)', torch.type(self), self.p) -end - - -function Dropout:clearState() - if self.noise then - self.noise:set() - end - return Parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/ELU.lua b/contrib/lua-torch/nn/ELU.lua deleted file mode 100644 index 48a6caa2cc..0000000000 --- a/contrib/lua-torch/nn/ELU.lua +++ /dev/null @@ -1,45 +0,0 @@ -local ELU, parent = torch.class('nn.ELU', 'nn.Module') - ---[[ - Djork-Arné Clevert, Thomas Unterthiner, Sepp Hochreiter - Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) - http://arxiv.org/pdf/1511.07289.pdf ---]] - -function ELU:__init(alpha, inplace) - parent.__init(self) - self.alpha = alpha or 1 - assert(type(self.alpha) == 'number') - self.inplace = inplace or false - assert(type(self.inplace) == 'boolean') -end - -function ELU:updateOutput(input) - local inplace = self.inplace or false - - input.THNN.ELU_updateOutput( - input:cdata(), - self.output:cdata(), - self.alpha, - inplace - ) - return self.output -end - -function ELU:updateGradInput(input, gradOutput) - local inplace = self.inplace or false - - input.THNN.ELU_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.output:cdata(), - self.alpha, - inplace - ) - return self.gradInput -end - -function ELU:__tostring__() - return string.format('%s (alpha:%f)', torch.type(self), self.alpha) -end diff --git a/contrib/lua-torch/nn/ErrorMessages.lua b/contrib/lua-torch/nn/ErrorMessages.lua deleted file mode 100644 index a5cbed053d..0000000000 --- a/contrib/lua-torch/nn/ErrorMessages.lua +++ /dev/null @@ -1,19 +0,0 @@ - -local mt = { - __index = function(table, key) - error("nn."..key.." is only supported for Float or Double Tensors.") - end -} - -local tensors = { - torch.ByteTensor, - torch.CharTensor, - torch.ShortTensor, - torch.IntTensor, - torch.LongTensor, -} - -for _, t in ipairs(tensors) do - t.nn = {} - setmetatable(t.nn, mt) -end diff --git a/contrib/lua-torch/nn/Euclidean.lua b/contrib/lua-torch/nn/Euclidean.lua deleted file mode 100644 index 509feff508..0000000000 --- a/contrib/lua-torch/nn/Euclidean.lua +++ /dev/null @@ -1,197 +0,0 @@ -local Euclidean, parent = torch.class('nn.Euclidean', 'nn.Module') - -function Euclidean:__init(inputSize,outputSize) - parent.__init(self) - - self.weight = torch.Tensor(inputSize,outputSize) - self.gradWeight = torch.Tensor(inputSize,outputSize) - - -- state - self.gradInput:resize(inputSize) - self.output:resize(outputSize) - - self.fastBackward = true - - self:reset() -end - -function Euclidean:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1./math.sqrt(self.weight:size(1)) - end - if nn.oldSeed then - for i=1,self.weight:size(2) do - self.weight:select(2, i):apply(function() - return torch.uniform(-stdv, stdv) - end) - end - else - self.weight:uniform(-stdv, stdv) - end -end - -local function view(res, src, ...) - local args = {...} - if src:isContiguous() then - res:view(src, table.unpack(args)) - else - res:reshape(src, table.unpack(args)) - end -end - -function Euclidean:updateOutput(input) - -- lazy initialize buffers - self._input = self._input or input.new() - self._weight = self._weight or self.weight.new() - self._expand = self._expand or self.output.new() - self._expand2 = self._expand2 or self.output.new() - self._repeat = self._repeat or self.output.new() - self._repeat2 = self._repeat2 or self.output.new() - - local inputSize, outputSize = self.weight:size(1), self.weight:size(2) - - -- y_j = || w_j - x || = || x - w_j || - if input:dim() == 1 then - view(self._input, input, inputSize, 1) - self._expand:expandAs(self._input, self.weight) - self._repeat:resizeAs(self._expand):copy(self._expand) - self._repeat:add(-1, self.weight) - self.output:norm(self._repeat, 2, 1) - self.output:resize(outputSize) - elseif input:dim() == 2 then - local batchSize = input:size(1) - - view(self._input, input, batchSize, inputSize, 1) - self._expand:expand(self._input, batchSize, inputSize, outputSize) - -- make the expanded tensor contiguous (requires lots of memory) - self._repeat:resizeAs(self._expand):copy(self._expand) - - self._weight:view(self.weight, 1, inputSize, outputSize) - self._expand2:expandAs(self._weight, self._repeat) - - if torch.type(input) == 'torch.CudaTensor' then - -- requires lots of memory, but minimizes cudaMallocs and loops - self._repeat2:resizeAs(self._expand2):copy(self._expand2) - self._repeat:add(-1, self._repeat2) - else - self._repeat:add(-1, self._expand2) - end - - self.output:norm(self._repeat, 2, 2) - self.output:resize(batchSize, outputSize) - else - error"1D or 2D input expected" - end - - return self.output -end - -function Euclidean:updateGradInput(input, gradOutput) - if not self.gradInput then - return - end - - self._div = self._div or input.new() - self._output = self._output or self.output.new() - self._gradOutput = self._gradOutput or input.new() - self._expand3 = self._expand3 or input.new() - - if not self.fastBackward then - self:updateOutput(input) - end - - local inputSize, outputSize = self.weight:size(1), self.weight:size(2) - - --[[ - dy_j -2 * (w_j - x) x - w_j - ---- = --------------- = ------- - dx 2 || w_j - x || y_j - --]] - - -- to prevent div by zero (NaN) bugs - self._output:resizeAs(self.output):copy(self.output):add(0.0000001) - view(self._gradOutput, gradOutput, gradOutput:size()) - self._div:cdiv(gradOutput, self._output) - if input:dim() == 1 then - self._div:resize(1, outputSize) - self._expand3:expandAs(self._div, self.weight) - - if torch.type(input) == 'torch.CudaTensor' then - self._repeat2:resizeAs(self._expand3):copy(self._expand3) - self._repeat2:cmul(self._repeat) - else - self._repeat2:cmul(self._repeat, self._expand3) - end - - self.gradInput:sum(self._repeat2, 2) - self.gradInput:resizeAs(input) - elseif input:dim() == 2 then - local batchSize = input:size(1) - - self._div:resize(batchSize, 1, outputSize) - self._expand3:expand(self._div, batchSize, inputSize, outputSize) - - if torch.type(input) == 'torch.CudaTensor' then - self._repeat2:resizeAs(self._expand3):copy(self._expand3) - self._repeat2:cmul(self._repeat) - else - self._repeat2:cmul(self._repeat, self._expand3) - end - - self.gradInput:sum(self._repeat2, 3) - self.gradInput:resizeAs(input) - else - error"1D or 2D input expected" - end - - return self.gradInput -end - -function Euclidean:accGradParameters(input, gradOutput, scale) - local inputSize, outputSize = self.weight:size(1), self.weight:size(2) - scale = scale or 1 - - --[[ - dy_j 2 * (w_j - x) w_j - x - ---- = --------------- = ------- - dw_j 2 || w_j - x || y_j - --]] - -- assumes a preceding call to updateGradInput - if input:dim() == 1 then - self.gradWeight:add(-scale, self._repeat2) - elseif input:dim() == 2 then - self._sum = self._sum or input.new() - self._sum:sum(self._repeat2, 1) - self._sum:resize(inputSize, outputSize) - self.gradWeight:add(-scale, self._sum) - else - error"1D or 2D input expected" - end -end - -function Euclidean:type(type, tensorCache) - if type then - -- prevent premature memory allocations - self:clearState() - end - return parent.type(self, type, tensorCache) -end - -function Euclidean:clearState() - nn.utils.clear(self, { - '_input', - '_output', - '_gradOutput', - '_weight', - '_div', - '_sum', - '_expand', - '_expand2', - '_expand3', - '_repeat', - '_repeat2', - }) - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/Exp.lua b/contrib/lua-torch/nn/Exp.lua deleted file mode 100644 index f415690269..0000000000 --- a/contrib/lua-torch/nn/Exp.lua +++ /dev/null @@ -1,9 +0,0 @@ -local Exp = torch.class('nn.Exp', 'nn.Module') - -function Exp:updateOutput(input) - return self.output:exp(input) -end - -function Exp:updateGradInput(input, gradOutput) - return self.gradInput:cmul(self.output, gradOutput) -end diff --git a/contrib/lua-torch/nn/FlattenTable.lua b/contrib/lua-torch/nn/FlattenTable.lua deleted file mode 100644 index 1c182557c5..0000000000 --- a/contrib/lua-torch/nn/FlattenTable.lua +++ /dev/null @@ -1,106 +0,0 @@ -local FlattenTable, parent = torch.class('nn.FlattenTable', 'nn.Module') - -function FlattenTable:__init() - parent.__init(self) - - self.output = {} - self.input_map = {} - self.gradInput = {} -end - --- Recursive function to flatten a table (output is a table) -local function flatten(output, input) - local input_map -- has the same structure as input, but stores the - -- indices to the corresponding output - if type(input) == 'table' then - input_map = {} - -- forward DFS order - for i = 1, #input do - input_map[#input_map+1] = flatten(output, input[i]) - end - else - input_map = #output + 1 - output[input_map] = input -- append the tensor - end - return input_map -end - --- Recursive function to check if we need to rebuild the output table -local function checkMapping(output, input, input_map) - if input_map == nil or output == nil or input == nil then - return false - end - if type(input) == 'table' then - if type(input_map) ~= 'table' then - return false - end - if #input ~= #input_map then - return false - end - -- forward DFS order - for i = 1, #input do - local ok = checkMapping(output, input[i], input_map[i]) - if not ok then - return false - end - end - return true - else - if type(input_map) ~= 'number' then - return false - end - return output[input_map] == input - end -end - --- During BPROP we have to build a gradInput with the same shape as the --- input. This is a recursive function to build up a gradInput -local function inverseFlatten(gradOutput, input_map) - if type(input_map) == 'table' then - local gradInput = {} - for i = 1, #input_map do - gradInput[#gradInput + 1] = inverseFlatten(gradOutput, input_map[i]) - end - return gradInput - else - return gradOutput[input_map] - end -end - -function FlattenTable:updateOutput(input) - assert(type(input) == 'table', 'input must be a table') - -- to avoid updating rebuilding the flattened table every updateOutput call - -- we will do a DFS pass over the existing output table and the inputs to - -- see if it needs to be rebuilt. - if not checkMapping(self.output, input, self.input_map) then - self.output = {} - self.input_map = flatten(self.output, input) - end - return self.output -end - -function FlattenTable:updateGradInput(input, gradOutput) - assert(type(input) == 'table', 'input must be a table') - assert(type(input) == 'table', 'gradOutput must be a table') - -- If the input changes between the updateOutput and updateGradInput call, - -- then we may have to rebuild the input_map! However, let's assume that - -- the input_map is valid and that forward has already been called. - - -- However, we should check that the gradInput is valid: - if not checkMapping(gradOutput, self.gradInput, self.input_map) then - self.gradInput = inverseFlatten(gradOutput, self.input_map) - end - - return self.gradInput -end - -function FlattenTable:type(type, tensorCache) - -- This function just stores references so we don't need to do any type - -- conversions. Just force the tables to be empty. - self:clearState() -end - -function FlattenTable:clearState() - self.input_map = {} - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/GPU.lua b/contrib/lua-torch/nn/GPU.lua deleted file mode 100644 index 758618d8b2..0000000000 --- a/contrib/lua-torch/nn/GPU.lua +++ /dev/null @@ -1,273 +0,0 @@ ------------------------------------------------------------------------- ---[[ GPU ]]-- --- Decorates a module such that its parameters are --- hosted on a specified GPU device. --- The operations are also executed on that device. --- Arguments input and gradOutput are converted to the specified device --- before being fed to the decorated module. --- Returned output is on the specified outdevice (defaults to device). --- Returned gradInput is allocated on the same device as the input. --- The unit test is located in cunn. ------------------------------------------------------------------------- -local GPU, parent = torch.class("nn.GPU", "nn.Container") - -function GPU:__init(module, device, outdevice) - parent.__init(self) - assert(torch.type(device) == 'number') - self.device = device - self.outdevice = outdevice or device - - assert(torch.isTypeOf(module, 'nn.Module')) - self.modules[1] = module - - if module:type():find('torch%.Cuda.*Tensor') then - self:type(module:type()) - end -end - -function GPU.recursiveModuleDevice(obj, device) - if type(obj) == 'table' and not torch.isTypeOf(obj, 'nn.GPU') and not obj.__noGPU__ then - for k,v in pairs(obj) do - obj[k] = GPU.recursiveModuleDevice(v, device) - end - elseif torch.type(obj):match('torch.Cuda.*Tensor') then - if obj:getDevice() ~= device then - obj = obj:clone() -- this will reallocate it to device - local newdevice = obj:getDevice() - -- when nElement() == 0 newdevice is 0 - assert(newdevice == device or newdevice == 0) - end - end - assert(obj ~= nil) - return obj -end - --- set the device of the decorated module -function GPU:setDevice(device) - self.device = device or self.device - - assert(self.modules[1]) - self.modules[1] = cutorch.withDevice(self.device, function() - return self.recursiveModuleDevice(self.modules[1], self.device) - end) - return self -end - --- when proto is a device number, returns a dst that has device device for each element in src --- otherwise, if proto is a table/tensor, makes sure dst is a identical to src, yet on the same device as proto -function GPU.recursiveSetDevice(dst, src, proto) - local device, prototable - if torch.isTensor(proto) then - device = proto:getDevice() - elseif torch.type(proto) == 'number' then - device = proto - elseif torch.type(proto) == 'table' then - prototable = true - else - error"Expecting number, table or tensor for arg 3 (proto)" - end - if torch.type(src) == 'table' then - dst = torch.type(dst) == 'table' and dst or {} - for k,v in ipairs(src) do - dst[k] = GPU.recursiveSetDevice(dst[k], v, prototable and proto[k] or device) - end - for k=#src+1,#dst do - dst[k] = nil - end - elseif torch.type(src):match('torch.Cuda.*Tensor') and src:getDevice() ~= device and src:getDevice() ~= 0 then - if not (torch.type(dst):match('torch.Cuda.*Tensor') and dst:getDevice() == device) then - dst = src.new() - end - cutorch.withDevice(device, function() dst:resizeAs(src):copy(src) end) - else - dst = src - end - return dst -end - -function GPU:updateOutput(input) - if self._type:find('torch%.Cuda.*Tensor') then - self._input = self.recursiveSetDevice(self._input, input, self.device) - - local output = cutorch.withDevice(self.device, function() - return self.modules[1]:updateOutput(self._input) - end) - - if self.device ~= self.outdevice then - self.output = self.recursiveSetDevice(self.output, output, self.outdevice) - else - self.output = output - end - else - self.output = self.modules[1]:updateOutput(input) - end - - return self.output -end - -function GPU:updateGradInput(input, gradOutput) - if self._type:find('torch%.Cuda.*Tensor') then - self._gradOutput = self.recursiveSetDevice(self._gradOutput, gradOutput, self.device) - - local gradInput = cutorch.withDevice(self.device, function() - return self.modules[1]:updateGradInput(self._input, self._gradOutput) - end) - - self.gradInput = self.recursiveSetDevice(self.gradInput, gradInput, input) - else - self.gradInput = self.modules[1]:updateGradInput(input, gradOutput) - end - - return self.gradInput -end - -function GPU:accGradParameters(input, gradOutput, scale) - if self._type:find('torch%.Cuda.*Tensor') then - cutorch.withDevice(self.device, function() - self.modules[1]:accGradParameters(self._input, self._gradOutput, scale) - end) - else - self.modules[1]:accGradParameters(input, gradOutput, scale) - end -end - -function GPU:apply(callback) - if self._type:find('torch%.Cuda.*Tensor') then - cutorch.withDevice(self.device, function() parent.apply(self, callback) end) - else - parent.apply(self, callback) - end -end - -function GPU:type(type, typecache) - if type and type:find('torch%.Cuda.*Tensor') then - cutorch.withDevice(self.device, function() parent.type(self, type, typecache) end) - self:setDevice() - else - self.output = nil - self.gradInput = nil - self._input = nil - self._gradOutput = nil - parent.type(self, type, typecache) - end - return self -end - -function GPU:clearState() - nn.utils.clear(self, 'output', 'gradInput') - self._input = nil - self._gradOutput = nil - if self._type:find('torch%.Cuda.*Tensor') then - cutorch.withDevice(self.device, function() parent.clearState(self) end) - else - parent.clearState(self) - end -end - -function GPU:zeroGradParameters() - if self._type:find('torch%.Cuda.*Tensor') then - cutorch.withDevice(self.device, function() parent.zeroGradParameters(self) end) - else - parent.zeroGradParameters(self) - end -end - -function GPU:updateParameters(lr) - if self._type:find('torch%.Cuda.*Tensor') then - cutorch.withDevice(self.device, function() parent.updateParameters(self, lr) end) - else - parent.updateParameters(self, lr) - end -end - -function GPU:training() - if self._type:find('torch%.Cuda.*Tensor') then - cutorch.withDevice(self.device, function() parent.training(self) end) - else - parent.training(self) - end -end - -function GPU:evaluate() - if self._type:find('torch%.Cuda.*Tensor') then - cutorch.withDevice(self.device, function() parent.evaluate(self) end) - else - parent.evaluate(self) - end -end - -function GPU:share(mlp, ...) - local args = {...} - if self._type:find('torch%.Cuda.*Tensor') then - cutorch.withDevice(self.device, function() parent.share(self, mlp, unpack(args)) end) - else - parent.share(self, mlp, unpack(args)) - end - return self -end - -function GPU:reset(...) - local args = {...} - if self._type:find('torch%.Cuda.*Tensor') then - cutorch.withDevice(self.device, function() parent.reset(self, unpack(args)) end) - else - parent.reset(self, unpack(args)) - end - return self -end - -function GPU:clone(...) - local args = {...} - if self._type:find('torch%.Cuda.*Tensor') then - return cutorch.withDevice(self.device, function() parent.clone(self, unpack(args)) end) - else - return parent.clone(self, unpack(args)) - end -end - -function GPU:write(file) - -- Write all values in the object as a table. - local object = {} - for k, v in pairs(self) do - object[k] = v - end - local header = {self._type, self.device} - file:writeObject(header) - file:writeObject(object) -end - -function GPU:read(file) - local header = file:readObject() - local object - if header[1] and header[1]:find('torch%.Cuda.*Tensor') then - local device = header[2] - if device > cutorch.getDeviceCount() then - print"Warning : model was saved with more devices than available on current host." - print"Attempting to load module onto device 1" - device = 1 - end - object = cutorch.withDevice(device, function() return file:readObject() end) - else - object = file:readObject() - end - - for k, v in pairs(object) do - self[k] = v - end -end - -function GPU:__tostring__() - if self.modules[1].__tostring__ then - return torch.type(self) .. '(' .. self.device ..') @ ' .. self.modules[1]:__tostring__() - else - return torch.type(self) .. '(' .. self.device ..') @ ' .. torch.type(self.modules[1]) - end -end - -function GPU:accUpdateGradParameters(input, gradOutput, lr) - error("Not Implemented for "..torch.type(self)) -end - -function GPU:sharedAccUpdateGradParameters(input, gradOutput, lr) - error("Not Implemented for "..torch.type(self)) -end diff --git a/contrib/lua-torch/nn/GatedLinearUnit.lua b/contrib/lua-torch/nn/GatedLinearUnit.lua deleted file mode 100644 index 5273abfd44..0000000000 --- a/contrib/lua-torch/nn/GatedLinearUnit.lua +++ /dev/null @@ -1,27 +0,0 @@ -local GatedLinearUnit, parent = torch.class('nn.GatedLinearUnit', 'nn.Module') - -function GatedLinearUnit:__init(dim) - parent.__init(self) - self.dim = dim -end - -function GatedLinearUnit:updateOutput(input) - local dim = self.dim or input:dim() - input.THNN.GatedLinear_updateOutput( - input:cdata(), - self.output:cdata(), - dim - ) - return self.output -end - -function GatedLinearUnit:updateGradInput(input, gradOutput) - local dim = self.dim or input:dim() - input.THNN.GatedLinear_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - dim - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/GradientReversal.lua b/contrib/lua-torch/nn/GradientReversal.lua deleted file mode 100644 index c08b1dfb0a..0000000000 --- a/contrib/lua-torch/nn/GradientReversal.lua +++ /dev/null @@ -1,32 +0,0 @@ -local GradientReversal, parent = torch.class('nn.GradientReversal', 'nn.Module') - -GradientReversal.__version = 2 - -function GradientReversal:__init(lambda) - lambda = lambda or 1 - parent.__init(self) - self.lambda = lambda -end - -function GradientReversal:setLambda(lambda) - self.lambda = lambda -end - -function GradientReversal:updateOutput(input) - self.output:set(input) - return self.output -end - -function GradientReversal:updateGradInput(input, gradOutput) - self.gradInput:resizeAs(gradOutput) - self.gradInput:copy(gradOutput) - self.gradInput:mul(-self.lambda) - return self.gradInput -end - -function GradientReversal:read(file, version) - parent.read(self, file) - if version < 2 then - self.lambda = 1 - end -end diff --git a/contrib/lua-torch/nn/HardShrink.lua b/contrib/lua-torch/nn/HardShrink.lua deleted file mode 100644 index 85ff5909cb..0000000000 --- a/contrib/lua-torch/nn/HardShrink.lua +++ /dev/null @@ -1,25 +0,0 @@ -local HardShrink, parent = torch.class('nn.HardShrink', 'nn.Module') - -function HardShrink:__init(lam) - parent.__init(self) - self.lambda = lam or 0.5 -end - -function HardShrink:updateOutput(input) - input.THNN.HardShrink_updateOutput( - input:cdata(), - self.output:cdata(), - self.lambda - ) - return self.output -end - -function HardShrink:updateGradInput(input, gradOutput) - input.THNN.HardShrink_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.lambda - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/HardTanh.lua b/contrib/lua-torch/nn/HardTanh.lua deleted file mode 100644 index 07cfc62553..0000000000 --- a/contrib/lua-torch/nn/HardTanh.lua +++ /dev/null @@ -1,37 +0,0 @@ -local HardTanh, parent = torch.class('nn.HardTanh', 'nn.Module') - -function HardTanh:__init(min_value, max_value, inplace) - parent.__init(self) - self.min_val = min_value or -1 - self.max_val = max_value or 1 - self.inplace = inplace or false - if (inplace and type(inplace) ~= 'boolean') then - error('in-place flag must be boolean') - end - assert(self.max_val>self.min_val, 'max_value must be larger than min_value') -end - -function HardTanh:updateOutput(input) - self.min_val = self.min_val or -1 - self.max_val = self.max_val or 1 - input.THNN.HardTanh_updateOutput( - input:cdata(), - self.output:cdata(), - self.min_val, - self.max_val, - self.inplace or false - ) - return self.output -end - -function HardTanh:updateGradInput(input, gradOutput) - input.THNN.HardTanh_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.min_val, - self.max_val, - self.inplace or false - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/HingeEmbeddingCriterion.lua b/contrib/lua-torch/nn/HingeEmbeddingCriterion.lua deleted file mode 100644 index 13ad00f198..0000000000 --- a/contrib/lua-torch/nn/HingeEmbeddingCriterion.lua +++ /dev/null @@ -1,43 +0,0 @@ -local HingeEmbeddingCriterion, parent = torch.class('nn.HingeEmbeddingCriterion', 'nn.Criterion') - -function HingeEmbeddingCriterion:__init(margin) - parent.__init(self) - self.margin = margin or 1 - self.sizeAverage = true -end - -function HingeEmbeddingCriterion:updateOutput(input,y) - self.buffer = self.buffer or input.new() - if not torch.isTensor(y) then - self.ty = self.ty or input.new():resize(1) - self.ty[1]=y - y=self.ty - end - - self.buffer:resizeAs(input):copy(input) - self.buffer[torch.eq(y, -1)] = 0 - self.output = self.buffer:sum() - - self.buffer:fill(self.margin):add(-1, input) - self.buffer:cmax(0) - self.buffer[torch.eq(y, 1)] = 0 - self.output = self.output + self.buffer:sum() - - if (self.sizeAverage == nil or self.sizeAverage == true) then - self.output = self.output / input:nElement() - end - - return self.output -end - -function HingeEmbeddingCriterion:updateGradInput(input, y) - if not torch.isTensor(y) then self.ty[1]=y; y=self.ty end - self.gradInput:resizeAs(input):copy(y) - self.gradInput[torch.cmul(torch.eq(y, -1), torch.gt(input, self.margin))] = 0 - - if (self.sizeAverage == nil or self.sizeAverage == true) then - self.gradInput:mul(1 / input:nElement()) - end - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Identity.lua b/contrib/lua-torch/nn/Identity.lua deleted file mode 100644 index 5e6ccb6247..0000000000 --- a/contrib/lua-torch/nn/Identity.lua +++ /dev/null @@ -1,30 +0,0 @@ -local Identity, _ = torch.class('nn.Identity', 'nn.Module') - -function Identity:updateOutput(input) - self.output = input - return self.output -end - - -function Identity:updateGradInput(input, gradOutput) - self.gradInput = gradOutput - return self.gradInput -end - -function Identity:clearState() - -- don't call set because it might reset referenced tensors - local function clear(f) - if self[f] then - if torch.isTensor(self[f]) then - self[f] = self[f].new() - elseif type(self[f]) == 'table' then - self[f] = {} - else - self[f] = nil - end - end - end - clear('output') - clear('gradInput') - return self -end diff --git a/contrib/lua-torch/nn/Index.lua b/contrib/lua-torch/nn/Index.lua deleted file mode 100644 index 6aa4297083..0000000000 --- a/contrib/lua-torch/nn/Index.lua +++ /dev/null @@ -1,32 +0,0 @@ -local Index, parent = torch.class('nn.Index', 'nn.Module') - -function Index:__init(dimension) - parent.__init(self) - self.dimension = dimension - self.gradInput = {self.gradInput, self.gradInput.new()} -end - -function Index:updateOutput(input) - local t = input[1] - local index = input[2] - self.output:index(t, self.dimension, index) - return self.output -end - -function Index:updateGradInput(input, gradOutput) - local t = input[1] - local index = input[2] - - self.gradInput[2]:resize(index:size()):zero() - local gradInput = self.gradInput[1] -- no gradient for the index variable - gradInput:resizeAs(t):zero() - gradInput:indexAdd(self.dimension, index, gradOutput) - return self.gradInput -end - -function Index:clearState() - self.gradInput[1]:set() - self.gradInput[2]:set() - self.output:set() - return self -end diff --git a/contrib/lua-torch/nn/IndexLinear.lua b/contrib/lua-torch/nn/IndexLinear.lua deleted file mode 100644 index 928e5d3f2c..0000000000 --- a/contrib/lua-torch/nn/IndexLinear.lua +++ /dev/null @@ -1,398 +0,0 @@ -local ffi = require 'ffi' -local IndexLinear, parent = torch.class('nn.IndexLinear', 'nn.Module') - - - -function IndexLinear:__init(inputSize, outputSize, doGradInput, keysOffset, weight, bias, normalize) - parent.__init(self) - - -- We need for 3 extra parameters per feature - -- if we normalize: - -- * The max-abs value - -- * The inverse of the max-abs value - -- * The per-feature bias - -- We keep an extra placeholder for further per learning rate feature manipulation. - -- So it's 4 total. - self.normalize = normalize and 4 or 0 - - -- This is important to keep the possibility of sharing a weight - -- directly, without having to allocate it first. - -- The reason is these weights can be very large. - self.weight = weight or torch.Tensor(inputSize, outputSize + self.normalize):zero() - self.bias = bias or torch.Tensor(outputSize):zero() - self.inputSize = self.weight and self.weight:size(1) or inputSize - self.outputSize = self.weight and (self.weight:size(2)-self.normalize) or outputSize - - -- gradWeight is not initialized as we're doing dense gradient accumulation - -- This is more efficient and avoids allocating a giant useless gradWeight - self.gradWeight = torch.Tensor() - - -- gradBias still works the same as it's already dense - self.gradBias = torch.Tensor(self.outputSize):zero() - - -- Buffers - self.gradWeightBuffer = torch.Tensor() - self.valuesBuffer = torch.Tensor() - self.normalizedValues = torch.Tensor() - - -- That is used to accumulate keys and gradWeight - -- when doing gradients accumulations - self.running = { - cumSumSizes = {}, - keys = {}, - gradWeight = {}, - counter = 1, - } - - -- self.sizes, self.cumSumSizes are calculated on the CPU even when using CUDA. - -- These two tables make it easier to resize these buffers instead of re-allocating them. - -- self.*Cache[1] always contains values on CPU. - -- If CUDA is being used, self.*Cache[2] contains values on GPU. - self.sizesCache = {} - self.cumSumSizesCache = {} - - -- A few options - self.weightDecay = 0 - self.doGradInput = doGradInput or false - self.offset = keysOffset and keysOffset-1 or -1 -- if this adds self.offset to indices -end - --- Reset all the parameters needed --- for normalization to 0 -function IndexLinear:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1./math.sqrt(self.weight:size(2)) - end - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv):mul(0.000001) - if self.normalize and self.normalize > 0 then - self.weight[{{}, {1,self.normalize}}]:zero() - end -end - -function IndexLinear:reshapeInput(input) - assert(type(input) == 'table') - - local ninputs = 0 - for _, v in ipairs(input) do - ninputs = ninputs + 1 - end - - assert(ninputs == 2 or ninputs == 3) - - -- If format is: - -- { - -- torch.LongTensor(size1+size2+...+sizeN), -- concatenated batch of keys - -- torch.Tensor(size1+size2+...+sizeN), -- concatenated batch of values - -- torch.LongTensor(N), -- keys/values sizes (values are {size1, ..., sizeN}) - -- } - if ninputs == 3 then - local fkeys = input[1] - local fvals = input[2] - local fsizes = torch.isTensor(input[3]) and input[3] or fkeys.new{input[3]} - assert(fkeys:nElement() == fvals:nElement(), 'Keys and values should be of same size') - assert(fkeys:dim() == 1, 'Keys and values should be 1D') - self.isFlat = true - self.noBatch = false - return fkeys, fvals, fsizes - end - - local keys = input[1] - local values = input[2] - local lkeys, lvalues - - -- If format is: - -- { - -- { torch.LongTensor(size1), torch.LongTensor(size2), ..., torch.LongTensor(sizeN) }, -- batch of keys - -- { torch.Tensor(size1), torch.Tensor(size2), ..., torch.Tensor(sizeN) }, -- batch of values, - -- } - if type(keys) == 'table' and type(values) == 'table' then - lkeys, lvalues = keys, values - self.isFlat = false - self.noBatch = false - - -- If format is not a batch: - -- { - -- torch.LongTensor(size1), -- keys - -- torch.Tensor(size1), -- values, - -- } - elseif torch.isTensor(keys) and torch.isTensor(values) then - lkeys, lvalues = {keys}, {values} - self.isFlat = false - self.noBatch = true - else - error('Wrong input format.') - end - - for i=1,#lkeys do - assert(lvalues[i]:dim() == 1 and lkeys[i]:dim() == 1, "keys and values should be 1D") - end - - return lkeys, lvalues -end - -function IndexLinear:longTensor(...) - if (self:type() == 'torch.CudaTensor') then - return torch.CudaLongTensor(...) - else - return torch.LongTensor(...) - end -end - -function IndexLinear:flattenInputs(input) - local lkeys, lvalues, sizes = self:reshapeInput(input) - - local counter = self.running.counter - - -- Ensure everything is of the right type - local isCuda = (self:type() == 'torch.CudaTensor') - self.running.keys[counter] = self.running.keys[counter] or self:longTensor() - self.keys = self.running.keys[counter] - - if self.isFlat then - self.values = self.values or lvalues.new() - self.sizes = self.sizes or self:longTensor() - - self.keys:resize(lkeys:size()):copy(lkeys) - self.values:resize(lvalues:size()):copy(lvalues) - self.sizes = sizes - self.cumSumSizes = self.cumSumSizes or self.sizes.new() - self.cumSumSizes:cumsum(self.sizes) - else - self.values = self.values or lvalues[1].new() - - self.lkeys = lkeys - self.lvalues = lvalues - local batchSize = #self.lkeys - - self.sizesCache[1] = self.sizesCache[1] or torch.LongTensor(batchSize) - self.cumSumSizesCache[1] = self.cumSumSizesCache[1] or torch.LongTensor(batchSize) - - self.sizes = self.sizesCache[1] - self.cumSumSizes = self.cumSumSizesCache[1] - - self.sizes:resize(batchSize) - self.cumSumSizes:resize(batchSize) - - for i = 1,batchSize do - self.sizes[i] = self.lkeys[i]:size(1) - end - self.cumSumSizes:cumsum(self.sizes) - - self.keys:cat(self.lkeys, 1) - self.values:cat(self.lvalues, 1) - - if isCuda then - -- Get the GPU cache - self.sizesCache[2] = self.sizesCache[2] or torch.CudaLongTensor() - self.cumSumSizesCache[2] = self.cumSumSizesCache[2] or torch.CudaLongTensor() - - self.sizes = self.sizesCache[2] - self.cumSumSizes = self.cumSumSizesCache[2] - - -- Resize and copy to GPU - self.sizes:resize(batchSize):copy(self.sizesCache[1]) - self.cumSumSizes:resize(batchSize):copy(self.cumSumSizesCache[1]) - end - end - self.running.cumSumSizes[counter] = self.cumSumSizes -end - -function IndexLinear:updateOutput(input) - - self:flattenInputs(input) - - self.values.THNN.IndexLinear_updateOutput( - self.keys:cdata(), - self.offset, - self.values:cdata(), - self.sizes:cdata(), - self.cumSumSizes:cdata(), - self.output:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.normalizedValues:cdata(), - self.train and 1 or 0 - ) - - if self.noBatch then - self.output:resize(self.output:size(2)) - end - return self.output -end - -function IndexLinear:accUpdateGradParameters(input, gradOutput, scale) - self.values.THNN.IndexLinear_accUpdateGradParameters( - self.keys:cdata(), - self.offset, - self.normalize > 0 and self.normalizedValues:cdata() or self.values:cdata(), - self.sizes:cdata(), - self.cumSumSizes:cdata(), - gradOutput:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.weightDecay or 0, - scale or 1 - ) -end - -function IndexLinear:accGradParameters(input, gradOutput, scale) - - local counter = self.running.counter - - -- Same as the running.keys in the updateOutput function, - -- get a table of dense running.gradWeight - self.running.gradWeight[counter] = self.running.gradWeight[counter] or self.values.new() - self.values.THNN.IndexLinear_accGradParameters( - self.keys:cdata(), - self.offset, - self.normalize > 0 and self.normalizedValues:cdata() or self.values:cdata(), - self.sizes:cdata(), - self.cumSumSizes:cdata(), - gradOutput:cdata(), - self.running.gradWeight[counter]:cdata(), - self.gradBias:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.valuesBuffer:cdata(), - self.weightDecay or 0, - scale or 1 - ) - - -- Increment the running counter to create a new buffer - -- if we don't flush them in zerogradParameters - self.running.counter = self.running.counter + 1 -end - -function IndexLinear:updateGradInput(input, gradOutput) - self.gradInput = {} - -- Revamped from nn.SparseLinear.updateGradInput - if self.doGradInput and self.normalize > 0 then - error('updateGradInput is not implemented in max-normalize mode') - end - - local ini = self.weight:size(1) - - if self.doGradInput then - local gi = gradOutput.new() - if gradOutput:dim() == 1 then - gi:resize(self.weight:size(1)) - gi:mv(self.weight,gradOutput) - gi:resize(1, self.weight:size(1)) - elseif gradOutput:dim() == 2 then - gi:resize(gradOutput:size(1), self.weight:size(1)) - gi:mm(gradOutput, self.weight:t()) - end - - local indices = self.running.keys[1].new(ini):range(1, ini) - - if self.isFlat then - self.gradInput[1] = torch.repeatTensor(indices, gi:size(1), 1) - self.gradInput[2] = gi - else - self.gradInput[1] = {} - self.gradInput[2] = {} - for i = 1,gi:size(1) do - self.gradInput[1][i] = self.running.keys[1].new(ini) - self.gradInput[1][i]:copy(indices) - self.gradInput[2][i] = gradOutput.new(ini) - self.gradInput[2][i]:copy(gi[i]) - end - end - end - - if self.noBatch then - if self.isFlat then - self.gradInput = {self.gradInput[1]:resize(ini), self.gradInput[2]:resize(ini)} - else - self.gradInput = {self.gradInput[1][1], self.gradInput[2][1]} - end - end - return self.gradInput -end - -function IndexLinear:updateParameters(lr) - local counter = self.running.counter - if counter > 1 then - if counter == 2 then - self.updateKeys = self.running.keys[1] - self.gradWeight = self.running.gradWeight[1] - else - self.updateKeysBuffer = self.updateKeysBuffer or self:longTensor() - local lkeys = {} - local lgweights = {} - local totalSize = 0 - local lCumSumSizes = {} - for i=1,counter-1 do - lkeys[i] = self.running.keys[i] - -- Change layout to take advantage of the 1-D contiguous torch.cat - lgweights[i] = self.running.gradWeight[i]:contiguous() - lgweights[i]:resize(lgweights[i]:nElement()) - lCumSumSizes[i] = totalSize + self.running.cumSumSizes[i] - totalSize = totalSize + lkeys[i]:size(1) - end - - self.updateKeysBuffer:cat(lkeys, 1) - self.gradWeightBuffer:cat(lgweights, 1) - self.cumSumSizes:cat(lCumSumSizes, 1) - self.gradWeightBuffer:resize(totalSize, self.outputSize) - self.gradWeight = self.gradWeightBuffer - self.updateKeys = self.updateKeysBuffer - end - self.values.THNN.IndexLinear_updateParameters( - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.updateKeys:cdata(), - self.cumSumSizes:cdata(), - self.offset, - self.weightDecay or 0, - lr or error('You must specify a learning rate') - ) - end -end - -function IndexLinear:zeroGradParameters() - -- No need to do anything here as gradWeight is dense - self.gradBias:zero() - - -- The below piece of code would reset - -- the smart scaling parameters for each features - -- each time we call zeroGradParameters - -- TODO: decide what to do with that piece of code. - -- NB: this should be commented along with the corresponding - -- piece of code in lib/THNN/generic/IndexLinear.c, in the accUpdateGradParameters function. - - --[[ - local w = self.weight:select(2, 3) - if self.updateKeys and self.updateKeys:nElement() > 0 then - self.updateKeysBuffer:resizeAs(self.updateKeys):copy(self.updateKeys):add(self.offset+1) - w:indexFill(1, self.updateKeysBuffer, 0) - end - ]]-- - self.running.counter = 1 -end - -function IndexLinear:parameters() - return {self.weight, self.bias}, {self.running, self.gradBias} -end - -function IndexLinear:clearState() - self.running.keys = {} - self.running.gradWeight = {} - self.keys = nil - self.zerokeys = nil - self.updateKeys = nil - self.values = nil - self.sizes = nil - self.lkeys = {} - self.lvalues = {} - self.gradWeightBuffer = self.gradWeightBuffer.new() - self.valuesBuffer = self.valuesBuffer.new() - self.updateKeysBuffer = nil - self.values = nil - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/Jacobian.lua b/contrib/lua-torch/nn/Jacobian.lua deleted file mode 100644 index 4f728b18c5..0000000000 --- a/contrib/lua-torch/nn/Jacobian.lua +++ /dev/null @@ -1,389 +0,0 @@ -nn.Jacobian = {} - -function nn.Jacobian.backward(module, input, param, dparam) - local doparam = 0 - if param then - doparam = 1 - end - param = param or input - -- output deriv - module:forward(input) - local dout = module.output.new():resizeAs(module.output) - -- 1D view - local sdout = module.output.new(dout:storage(),1,dout:nElement()) - -- jacobian matrix to calculate - local jacobian = torch.Tensor(param:nElement(),dout:nElement()):zero() - - for i=1,sdout:nElement() do - dout:zero() - sdout[i] = 1 - module:zeroGradParameters() - local din = module:updateGradInput(input, dout) - module:accGradParameters(input, dout) - if doparam == 1 then - jacobian:select(2,i):copy(dparam) - else - jacobian:select(2,i):copy(din) - end - end - return jacobian -end - -function nn.Jacobian.backwardUpdate(module, input, param) - - -- output deriv - module:forward(input) - local dout = module.output.new():resizeAs(module.output) - -- 1D view - local sdout = module.output.new(dout:storage(),1,dout:nElement()) - -- jacobian matrix to calculate - local jacobian = torch.Tensor(param:nElement(),dout:nElement()):zero() - - -- original param - local params = module:parameters() - local origparams = {} - for j=1,#params do - table.insert(origparams, params[j]:clone()) - end - - for i=1,sdout:nElement() do - for j=1,#params do - params[j]:copy(origparams[j]) - end - dout:zero() - sdout[i] = 1 - module:updateGradInput(input, dout) - module:accUpdateGradParameters(input, dout, 1) - jacobian:select(2,i):copy(param) - end - - for j=1,#params do - params[j]:copy(origparams[j]) - end - - return jacobian -end - -function nn.Jacobian.forward(module, input, param, perturbation) - param = param or input - -- perturbation amount - perturbation = perturbation or 1e-6 - -- 1D view of input - --local tst = param:storage() - local sin = param.new(param):resize(param:nElement())--param.new(tst,1,tst:size()) - -- jacobian matrix to calculate - local jacobian = torch.Tensor():resize(param:nElement(),module:forward(input):nElement()) - - local outa = torch.Tensor(jacobian:size(2)) - local outb = torch.Tensor(jacobian:size(2)) - - for i=1,sin:nElement() do - local orig = sin[i] - sin[i] = orig - perturbation - outa:copy(module:forward(input)) - sin[i] = orig + perturbation - outb:copy(module:forward(input)) - sin[i] = orig - - outb:add(-1,outa):div(2*perturbation) - jacobian:select(1,i):copy(outb) - end - - return jacobian -end - -function nn.Jacobian.backwardDiagHessian(module, input, diagHessianParamName) - -- Compute the second derivatives (diagonal Hessian elements) - -- by backpropagation (using the code from hessian.lua). - -- - -- This function computes the diagonal Hessian elements of the following function: - -- - -- F(x_1, x_2, ..., x_n) = y_1^2/2 + y_2^2/2 + ... + y_m^2/2, - -- - -- where - -- x_1, ..., x_n are the input values and parameters of the given module, - -- y_1, ..., y_m are the output values of the given module. - -- - -- All x_i and y_i values are scalars here. In other words, - -- x_1, ..., x_n denote the scalar elements of the module input tensor, - -- the scalar elements of module.weight, - -- and the scalar elements of module.bias; - -- y_1, ..., y_m are the scalar elements of the module output tensor. - -- - -- The diagonal Hessian elements of F are computed with respect to - -- the module input values and parameters (x_1, .., x_n). - -- - -- The function F is chosen for its convenient properties: - -- - -- dF / dy_i = y_i, - -- d^2F / dy_i^2 = 1. - -- - -- In other words, the diagonal Hessian elements of F with respect - -- to the module OUTPUT values (y_1, ... y_m) are equal to 1. - -- - -- Because of that, computing the diagonal Hessian elements of F - -- with respect to the module INPUT values and PARAMETERS (x_1, ..., x_n) - -- can be done by calling updateDiagHessianInput() and accDiagHessianParameters() - -- using a tensor of ones as diagHessianOutput. - - module:forward(input) - local diagHessianOutput = module.output.new():resizeAs(module.output):fill(1) - - module.diagHessianWeight:zero() - module.diagHessianBias:zero() - module:updateDiagHessianInput(input, diagHessianOutput) - module:accDiagHessianParameters(input, diagHessianOutput) - - return module[diagHessianParamName] -end - -function nn.Jacobian.linearModuleDiagHessian(module, input, gradParamName) - -- Compute the second derivatives (diagonal Hessian elements) - -- from the first derivatives for the given module - -- (without using the code from hessian.lua). - -- - -- The given module is assumed to be linear with respect to its inputs and weights - -- (like nn.Linear, nn.SpatialConvolution, etc.) - -- - -- This function computes the diagonal Hessian elements of the following function: - -- - -- F(x_1, x_2, ..., x_n) = y_1^2/2 + y_2^2/2 + ... + y_m^2/2. - -- - -- (See the the comment for nn.Jacobian.backwardDiagHessian() for explanation.) - -- - -- The first derivatives of F with respect to - -- the module inputs and parameters (x_1, ..., x_n) are: - -- - -- dF / dx_i = \sum_k (dF / dy_k) (dy_k / dx_i). - -- - -- The second derivatives are: - -- - -- d^2F / dx_i = \sum_k [(d^2F / dy_k^2) (dy_k / dx_i)^2 + (dF / dy_k) (d^2y_k / dx_i^2)]. - -- - -- The second derivatives of F with respect to the module outputs (y_1, ..., y_m) - -- are equal to 1, so: - -- - -- d^2F / dx_i = \sum_k [(dy_k / dx_i)^2 + (dF / dy_k) (d^2y_k / dx_i^2)]. - -- - -- Assuming the linearity of module outputs (y_1, ..., y_m) - -- with respect to module inputs and parameters (x_1, ..., x_n), - -- we have (d^2y_k / dx_i^2) = 0, - -- and the expression finally becomes: - -- - -- d^2F / dx_i = \sum_k (dy_k / dx_i)^2. - -- - -- The first derivatives (dy_k / dx_i) are computed by normal backpropagation, - -- using updateGradInput() and accGradParameters(). - - local gradParam = module[gradParamName] - - local diagHessian = gradParam.new():resize(gradParam:nElement()):zero() - - module:forward(input) - local gradOutput = module.output.new():resizeAs(module.output) - local gradOutput1D = gradOutput:view(gradOutput:nElement()) - - for i=1,gradOutput:nElement() do - gradOutput1D:zero() - gradOutput1D[i] = 1 - module.gradWeight:zero() - if module.bias then - module.gradBias:zero() - end - module:updateGradInput(input, gradOutput) - module:accGradParameters(input, gradOutput) - diagHessian:addcmul(gradParam, gradParam) - end - - return diagHessian -end - -function nn.Jacobian.forwardUpdate(module, input, param, perturbation) - -- perturbation amount - perturbation = perturbation or 1e-6 - -- 1D view of input - --local tst = param:storage() - local sin = param.new(param):resize(param:nElement())--param.new(tst,1,tst:size()) - -- jacobian matrix to calculate - local jacobian = torch.Tensor():resize(param:nElement(),module:forward(input):nElement()) - - local outa = torch.Tensor(jacobian:size(2)) - local outb = torch.Tensor(jacobian:size(2)) - - for i=1,sin:nElement() do - local orig = sin[i] - sin[i] = orig - perturbation - outa:copy(module:forward(input)) - sin[i] = orig + perturbation - outb:copy(module:forward(input)) - sin[i] = orig - - outb:add(-1,outa):div(2*perturbation) - jacobian:select(1,i):copy(outb) - jacobian:select(1,i):mul(-1) - jacobian:select(1,i):add(sin[i]) - end - return jacobian -end - -function nn.Jacobian.testJacobian(module, input, minval, maxval, perturbation) - minval = minval or -2 - maxval = maxval or 2 - local inrange = maxval - minval - input:copy(torch.rand(input:nElement()):mul(inrange):add(minval)) - local jac_fprop = nn.Jacobian.forward(module, input, input, perturbation) - local jac_bprop = nn.Jacobian.backward(module, input) - local error = jac_fprop-jac_bprop - return error:abs():max() -end - -function nn.Jacobian.testJacobianParameters(module, input, param, dparam, minval, maxval, perturbation) - minval = minval or -2 - maxval = maxval or 2 - local inrange = maxval - minval - input:copy(torch.rand(input:nElement()):mul(inrange):add(minval)) - param:copy(torch.rand(param:nElement()):mul(inrange):add(minval)) - local jac_bprop = nn.Jacobian.backward(module, input, param, dparam) - local jac_fprop = nn.Jacobian.forward(module, input, param, perturbation) - local error = jac_fprop - jac_bprop - return error:abs():max() -end - -function nn.Jacobian.testJacobianUpdateParameters(module, input, param, minval, maxval, perturbation) - minval = minval or -2 - maxval = maxval or 2 - local inrange = maxval - minval - input:copy(torch.rand(input:nElement()):mul(inrange):add(minval)) - param:copy(torch.rand(param:nElement()):mul(inrange):add(minval)) - local params_bprop = nn.Jacobian.backwardUpdate(module, input, param) - local params_fprop = nn.Jacobian.forwardUpdate(module, input, param, perturbation) - - local error = params_fprop - params_bprop - return error:abs():max() -end - -function nn.Jacobian.testDiagHessian(module, input, gradParamName, diagHessianParamName, minval, maxval) - -- Compute the diagonal Hessian elements for the same function in two different ways, - -- then compare the results and return the difference. - - minval = minval or -2 - maxval = maxval or 2 - local inrange = maxval - minval - input:copy(torch.rand(input:nElement()):mul(inrange):add(minval)) - module:initDiagHessianParameters() - local h_bprop = nn.Jacobian.backwardDiagHessian(module, input, diagHessianParamName) - local h_linearmodule = nn.Jacobian.linearModuleDiagHessian(module, input, gradParamName) - local error = h_bprop - h_linearmodule - return error:abs():max() -end - -function nn.Jacobian.testDiagHessianInput(module, input, minval, maxval) - return nn.Jacobian.testDiagHessian(module, input, 'gradInput', 'diagHessianInput', minval, maxval) -end - -function nn.Jacobian.testDiagHessianWeight(module, input, minval, maxval) - return nn.Jacobian.testDiagHessian(module, input, 'gradWeight', 'diagHessianWeight', minval, maxval) -end - -function nn.Jacobian.testDiagHessianBias(module, input, minval, maxval) - return nn.Jacobian.testDiagHessian(module, input, 'gradBias', 'diagHessianBias', minval, maxval) -end - -function nn.Jacobian.testIO(module,input, minval, maxval) - minval = minval or -2 - maxval = maxval or 2 - local inrange = maxval - minval - local inputclone = input:clone() - - -- run module - module:forward(input) - local go = module.output:clone():copy(torch.rand(module.output:nElement()):mul(inrange):add(minval)) - local goclone = go:clone() - module:zeroGradParameters() - module:updateGradInput(input,go) - module:accGradParameters(input,go) - - local fo = module.output:clone() - local bo = module.gradInput:clone() - - -- write module - local filename = os.tmpname() - local f = torch.DiskFile(filename, 'w'):binary() - -- call clearState and check that it returns itself - assert(module == module:clearState(),'clearState did not return self') - f:writeObject(module) - f:close() - -- read module - local m = torch.DiskFile(filename):binary():readObject() - m:forward(inputclone) - m:zeroGradParameters() - m:updateGradInput(inputclone,goclone) - m:accGradParameters(inputclone,goclone) - -- cleanup - os.remove(filename) - - local fo2 = m.output:clone() - local bo2 = m.gradInput:clone() - - local errf = fo - fo2 - local errb = bo - bo2 - return errf:abs():max(), errb:numel() == 0 and 0 or errb:abs():max() -end - -function nn.Jacobian.testAllUpdate(module, input, weight, gradWeight) - local gradOutput - local lr = torch.uniform(0.1, 1) - local errors = {} - - -- accGradParameters - local maccgp = module:clone() - local weightc = maccgp[weight]:clone() - maccgp:forward(input) - gradOutput = torch.rand(maccgp.output:size()) - maccgp:zeroGradParameters() - maccgp:updateGradInput(input, gradOutput) - maccgp:accGradParameters(input, gradOutput) - maccgp:updateParameters(lr) - errors["accGradParameters"] = (weightc-maccgp[gradWeight]*lr-maccgp[weight]):norm() - - -- accUpdateGradParameters - local maccugp = module:clone() - maccugp:forward(input) - maccugp:updateGradInput(input, gradOutput) - maccugp:accUpdateGradParameters(input, gradOutput, lr) - errors["accUpdateGradParameters"] = (maccugp[weight]-maccgp[weight]):norm() - - -- shared, accGradParameters - local macsh1 = module:clone() - local macsh2 = module:clone() - macsh2:share(macsh1, weight) - macsh1:forward(input) - macsh2:forward(input) - macsh1:zeroGradParameters() - macsh2:zeroGradParameters() - macsh1:updateGradInput(input, gradOutput) - macsh2:updateGradInput(input, gradOutput) - macsh1:accGradParameters(input, gradOutput) - macsh2:accGradParameters(input, gradOutput) - macsh1:updateParameters(lr) - macsh2:updateParameters(lr) - local err = (weightc-maccgp[gradWeight]*(lr*2)-macsh1[weight]):norm() - err = err + (weightc-maccgp[gradWeight]*(lr*2)-macsh2[weight]):norm() - errors["accGradParameters [shared]"] = err - - -- shared, accUpdateGradParameters - local macshu1 = module:clone() - local macshu2 = module:clone() - macshu2:share(macshu1, weight) - macshu1:forward(input) - macshu2:forward(input) - macshu1:updateGradInput(input, gradOutput) - macshu2:updateGradInput(input, gradOutput) - macshu1:accUpdateGradParameters(input, gradOutput, lr) - macshu2:accUpdateGradParameters(input, gradOutput, lr) - err = (weightc-maccgp[gradWeight]*(lr*2)-macshu1[weight]):norm() - err = err + (weightc-maccgp[gradWeight]*(lr*2)-macshu2[weight]):norm() - errors["accUpdateGradParameters [shared]"] = err - - return errors -end diff --git a/contrib/lua-torch/nn/JoinTable.lua b/contrib/lua-torch/nn/JoinTable.lua deleted file mode 100644 index 6ab68e189d..0000000000 --- a/contrib/lua-torch/nn/JoinTable.lua +++ /dev/null @@ -1,74 +0,0 @@ -local JoinTable, parent = torch.class('nn.JoinTable', 'nn.Module') - -function JoinTable:__init(dimension, nInputDims) - parent.__init(self) - self.size = torch.LongStorage() - self.dimension = dimension - self.gradInput = {} - self.nInputDims = nInputDims -end - -function JoinTable:_getPositiveDimension(input) - local dimension = self.dimension - if dimension < 0 then - dimension = input[1]:dim() + dimension + 1 - elseif self.nInputDims and input[1]:dim()==(self.nInputDims+1) then - dimension = dimension + 1 - end - return dimension -end - -function JoinTable:updateOutput(input) - local dimension = self:_getPositiveDimension(input) - - for i=1,#input do - local currentOutput = input[i] - if i == 1 then - self.size:resize(currentOutput:dim()):copy(currentOutput:size()) - else - self.size[dimension] = self.size[dimension] - + currentOutput:size(dimension) - end - end - self.output:resize(self.size) - - local offset = 1 - for i=1,#input do - local currentOutput = input[i] - self.output:narrow(dimension, offset, - currentOutput:size(dimension)):copy(currentOutput) - offset = offset + currentOutput:size(dimension) - end - return self.output -end - -function JoinTable:updateGradInput(input, gradOutput) - local dimension = self:_getPositiveDimension(input) - - for i=1,#input do - if self.gradInput[i] == nil then - self.gradInput[i] = input[i].new() - end - self.gradInput[i]:resizeAs(input[i]) - end - - -- clear out invalid gradInputs - for i=#input+1, #self.gradInput do - self.gradInput[i] = nil - end - - local offset = 1 - for i=1,#input do - local currentOutput = input[i] - local currentGradInput = gradOutput:narrow(dimension, offset, - currentOutput:size(dimension)) - self.gradInput[i]:copy(currentGradInput) - offset = offset + currentOutput:size(dimension) - end - return self.gradInput -end - -function JoinTable:type(type, tensorCache) - self.gradInput = {} - return parent.type(self, type, tensorCache) -end diff --git a/contrib/lua-torch/nn/Kmeans.lua b/contrib/lua-torch/nn/Kmeans.lua deleted file mode 100644 index 56066b63d9..0000000000 --- a/contrib/lua-torch/nn/Kmeans.lua +++ /dev/null @@ -1,215 +0,0 @@ --- Online (Hard) Kmeans layer. -local Kmeans, parent = torch.class('nn.Kmeans', 'nn.Module') - -function Kmeans:__init(k, dim, scale) - parent.__init(self) - self.k = k - self.dim = dim - - -- scale for online kmean update - self.scale = scale - - assert(k > 0, "Clusters cannot be 0 or negative.") - assert(dim > 0, "Dimensionality cannot be 0 or negative.") - - -- Kmeans centers -> self.weight - self.weight = torch.Tensor(self.k, self.dim) - - self.gradWeight = torch.Tensor(self.weight:size()) - self.loss = 0 -- within cluster error of the last forward - - self.clusterSampleCount = torch.Tensor(self.k) - - self:reset() -end - --- Reset -function Kmeans:reset(stdev) - stdev = stdev or 1 - self.weight:uniform(-stdev, stdev) -end - --- Initialize Kmeans weight with random samples from input. -function Kmeans:initRandom(input) - local inputDim = input:nDimension() - assert(inputDim == 2, "Incorrect input dimensionality. Expecting 2D.") - - local noOfSamples = input:size(1) - local dim = input:size(2) - assert(dim == self.dim, "Dimensionality of input and weight don't match.") - assert(noOfSamples >= self.k, "Need atleast k samples for initialization.") - - local indices = torch.zeros(self.k) - indices:random(1, noOfSamples) - - for i=1, self.k do - self.weight[i]:copy(input[indices[i]]) - end -end - --- Initialize using Kmeans++ -function Kmeans:initKmeansPlus(input, p) - self.p = p or self.p or 0.95 - assert(self.p>=0 and self.p<=1, "P value should be between 0-1.") - - local inputDim = input:nDimension() - assert(inputDim == 2, "Incorrect input dimensionality. Expecting 2D.") - local noOfSamples = input:size(1) - - local pcount = math.ceil((1-self.p)*noOfSamples) - if pcount <= 0 then pcount = 1 end - - local initializedK = 1 - self.weight[initializedK]:copy(input[torch.random(noOfSamples)]) - initializedK = initializedK + 1 - - local clusters = self.weight.new() - local clusterDistances = self.weight.new() - local temp = self.weight.new() - local expandedSample = self.weight.new() - local distances = self.weight.new() - distances:resize(noOfSamples):fill(math.huge) - local maxScores = self.weight.new() - local maxIndx = self.weight.new() - - for k=initializedK, self.k do - clusters = self.weight[{{initializedK-1, initializedK-1}}] - for i=1, noOfSamples do - temp:expand(input[{{i}}], 1, self.dim) - expandedSample:resize(temp:size()):copy(temp) - - -- Squared Euclidean distance - expandedSample:add(-1, clusters) - clusterDistances:norm(expandedSample, 2, 2) - clusterDistances:pow(2) - distances[i] = math.min(clusterDistances:min(), distances[i]) - end - maxScores, maxIndx = distances:sort(true) - local tempIndx = torch.random(pcount) - local indx = maxIndx[tempIndx] - self.weight[initializedK]:copy(input[indx]) - initializedK = initializedK + 1 - end -end - -local function isCudaTensor(tensor) - local typename = torch.typename(tensor) - if typename and typename:find('torch.Cuda*Tensor') then - return true - end - return false -end - --- Kmeans updateOutput (forward) -function Kmeans:updateOutput(input) - local inputDim = input:nDimension() - assert(inputDim == 2, "Incorrect input dimensionality. Expecting 2D.") - - local batchSize = input:size(1) - local dim = input:size(2) - assert(dim == self.dim, "Dimensionality of input and weight don't match.") - - assert(input:isContiguous(), "Input is not contiguous.") - - -- a sample copied k times to compute distance between sample and weight - self._expandedSamples = self._expandedSamples or self.weight.new() - - -- distance between a sample and weight - self._clusterDistances = self._clusterDistances or self.weight.new() - - self._temp = self._temp or input.new() - self._tempExpanded = self._tempExpanded or input.new() - - -- Expanding inputs - self._temp:view(input, 1, batchSize, self.dim) - self._tempExpanded:expand(self._temp, self.k, batchSize, self.dim) - self._expandedSamples:resize(self.k, batchSize, self.dim) - :copy(self._tempExpanded) - - -- Expanding weights - self._tempWeight = self._tempWeight or self.weight.new() - self._tempWeightExp = self._tempWeightExp or self.weight.new() - self._expandedWeight = self._expanedWeight or self.weight.new() - self._tempWeight:view(self.weight, self.k, 1, self.dim) - self._tempWeightExp:expand(self._tempWeight, self._expandedSamples:size()) - self._expandedWeight:resize(self.k, batchSize, self.dim) - :copy(self._tempWeightExp) - - -- x-c - self._expandedSamples:add(-1, self._expandedWeight) - -- Squared Euclidean distance - self._clusterDistances:norm(self._expandedSamples, 2, 3) - self._clusterDistances:pow(2) - self._clusterDistances:resize(self.k, batchSize) - - self._minScore = self._minScore or self.weight.new() - self._minIndx = self._minIndx or (isCudaTensor(input) and torch.CudaLongTensor() or torch.LongTensor()) - self._minScore:min(self._minIndx, self._clusterDistances, 1) - self._minIndx:resize(batchSize) - - self.output:resize(batchSize):copy(self._minIndx) - self.loss = self._minScore:sum() - - return self.output -end - --- Kmeans has its own criterion hence gradInput are zeros -function Kmeans:updateGradInput(input, gradOuput) - self.gradInput:resize(input:size()):zero() - - return self.gradInput -end - --- We define kmeans update rule as c -> c + scale * 1/n * sum_i (x-c). --- n is no. of x's belonging to c. --- With this update rule and gradient descent will be negative the gradWeights. -function Kmeans:accGradParameters(input, gradOutput, scale) - local scale = self.scale or scale or 1 - assert(scale > 0 , " Scale has to be positive.") - - -- Update cluster sample count - local batchSize = input:size(1) - self._cscAdder = self._cscAdder or self.weight.new() - self._cscAdder:resize(batchSize):fill(1) - self.clusterSampleCount:zero() - self.clusterSampleCount:indexAdd(1, self._minIndx, self._cscAdder) - - -- scale * (x[k]-c[k]) where k is nearest cluster to x - self._gradWeight = self._gradWeight or self.gradWeight.new() - self._gradWeight:index(self.weight, 1, self._minIndx) - self._gradWeight:mul(-1) - self._gradWeight:add(input) - self._gradWeight:mul(-scale) - - self._gradWeight2 = self._gradWeight2 or self.gradWeight.new() - self._gradWeight2:resizeAs(self.gradWeight):zero() - self._gradWeight2:indexAdd(1, self._minIndx, self._gradWeight) - - -- scale/n * sum_i (x-c) - self._ccounts = self._ccounts or self.clusterSampleCount.new() - self._ccounts:resize(self.k):copy(self.clusterSampleCount) - self._ccounts:add(0.0000001) -- prevent division by zero errors - - self._gradWeight2:cdiv(self._ccounts:view(self.k,1):expandAs(self.gradWeight)) - - self.gradWeight:add(self._gradWeight2) -end - -function Kmeans:clearState() - -- prevent premature memory allocations - self._expandedSamples = nil - self._clusterDistances = nil - self._temp = nil - self._tempExpanded = nil - self._tempWeight = nil - self._tempWeightExp = nil - self._expandedWeight = nil - self._minScore = nil - self._minIndx = nil - self._cscAdder = nil -end - -function Kmeans:type(type, tensorCache) - self:clearState() - return parent.type(self, type, tensorCache) -end diff --git a/contrib/lua-torch/nn/L1Cost.lua b/contrib/lua-torch/nn/L1Cost.lua deleted file mode 100644 index 6b58e0ec95..0000000000 --- a/contrib/lua-torch/nn/L1Cost.lua +++ /dev/null @@ -1,30 +0,0 @@ -local THNN = require 'nn.THNN' -local L1Cost, parent = torch.class('nn.L1Cost','nn.Criterion') - -function L1Cost:__init() - parent.__init(self) -end - -function L1Cost:updateOutput(input) - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.L1Cost_updateOutput( - input:cdata(), - self.output_tensor:cdata() - ) - self.output = self.output_tensor[1] - return self.output -end - -function L1Cost:updateGradInput(input) - input.THNN.L1Cost_updateGradInput( - input:cdata(), - THNN.NULL, - self.gradInput:cdata() - ) - return self.gradInput -end - -function L1Cost:clearState() - if self.output_tensor then self.output_tensor:set() end - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/L1HingeEmbeddingCriterion.lua b/contrib/lua-torch/nn/L1HingeEmbeddingCriterion.lua deleted file mode 100644 index 6957278f5d..0000000000 --- a/contrib/lua-torch/nn/L1HingeEmbeddingCriterion.lua +++ /dev/null @@ -1,41 +0,0 @@ -local L1HingeEmbeddingCriterion, parent = torch.class('nn.L1HingeEmbeddingCriterion', 'nn.Criterion') - -function L1HingeEmbeddingCriterion:__init(margin) - parent.__init(self) - margin = margin or 1 - self.margin = margin - self.gradInput = {torch.Tensor(), torch.Tensor()} -end - -function L1HingeEmbeddingCriterion:updateOutput(input,y) - self.output=input[1]:dist(input[2],1); - if y == -1 then - self.output = math.max(0,self.margin - self.output); - end - return self.output -end - - -local function mathsign(t) - if t>0 then return 1; end - if t<0 then return -1; end - return 2*torch.random(2)-3; -end - -function L1HingeEmbeddingCriterion:updateGradInput(input, y) - self.gradInput[1]:resizeAs(input[1]) - self.gradInput[2]:resizeAs(input[2]) - self.gradInput[1]:copy(input[1]) - self.gradInput[1]:add(-1, input[2]) - local dist = self.gradInput[1]:norm(1); - self.gradInput[1]:apply(mathsign) -- L1 gradient - if y == -1 then -- just to avoid a mul by 1 - if dist > self.margin then - self.gradInput[1]:zero() - else - self.gradInput[1]:mul(-1) - end - end - self.gradInput[2]:zero():add(-1, self.gradInput[1]) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/L1Penalty.lua b/contrib/lua-torch/nn/L1Penalty.lua deleted file mode 100644 index 9ee6b35ff2..0000000000 --- a/contrib/lua-torch/nn/L1Penalty.lua +++ /dev/null @@ -1,42 +0,0 @@ -local L1Penalty, parent = torch.class('nn.L1Penalty','nn.Module') - ---This module acts as an L1 latent state regularizer, adding the ---[gradOutput] to the gradient of the L1 loss. The [input] is copied to ---the [output]. - -function L1Penalty:__init(l1weight, sizeAverage, provideOutput) - parent.__init(self) - self.l1weight = l1weight - self.sizeAverage = sizeAverage or false - if provideOutput == nil then - self.provideOutput = true - else - self.provideOutput = provideOutput - end -end - -function L1Penalty:updateOutput(input) - local m = self.l1weight - if self.sizeAverage == true then - m = m/input:nElement() - end - local loss = m*input:norm(1) - self.loss = loss - self.output = input - return self.output -end - -function L1Penalty:updateGradInput(input, gradOutput) - local m = self.l1weight - if self.sizeAverage == true then - m = m/input:nElement() - end - - self.gradInput:resizeAs(input):copy(input):sign():mul(m) - - if self.provideOutput == true then - self.gradInput:add(gradOutput) - end - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/LayerNormalization.lua b/contrib/lua-torch/nn/LayerNormalization.lua deleted file mode 100644 index 722d7c8020..0000000000 --- a/contrib/lua-torch/nn/LayerNormalization.lua +++ /dev/null @@ -1,27 +0,0 @@ --- Reference: https://arxiv.org/pdf/1607.06450.pdf (Section 3) - -local LayerNormalization, parent = torch.class('nn.LayerNormalization', 'nn.Sequential') -function LayerNormalization:__init(nOutput, bias, eps, affine) - parent.__init(self) - eps = eps or 1e-10 - affine = (affine == nil) and true or affine - bias = bias or 0 - - self:add(nn.ConcatTable() - :add(nn.Identity()) - :add(nn.Sequential() - :add(nn.Mean(1, 1)) - :add(nn.Replicate(nOutput,1,1)))) - :add(nn.CSubTable()) - :add(nn.Normalize(2, eps)) - :add(nn.MulConstant(torch.sqrt(nOutput))) - - if affine then - local biasTransform = nn.Add(nOutput, false) - biasTransform.bias:fill(bias) - local gainTransform = nn.CMul(nOutput) - gainTransform.weight:fill(1.) - self:add(gainTransform) - self:add(biasTransform) - end -end diff --git a/contrib/lua-torch/nn/LeakyReLU.lua b/contrib/lua-torch/nn/LeakyReLU.lua deleted file mode 100644 index 56b7f25423..0000000000 --- a/contrib/lua-torch/nn/LeakyReLU.lua +++ /dev/null @@ -1,41 +0,0 @@ -local LeakyReLU, parent = torch.class('nn.LeakyReLU','nn.Module') - -function LeakyReLU:__init(negval,ip) - parent.__init(self) - if type(negval) == 'boolean' then - local ip = negval - self.negval = 1/100 - else - self.negval = negval or (1/100) - end - -- default for inplace is false - self.inplace = ip or false - if self.negval < 0 then - self.inplace = false - end -end - -function LeakyReLU:updateOutput(input) - input.THNN.LeakyReLU_updateOutput( - input:cdata(), - self.output:cdata(), - self.negval, - self.inplace - ) - return self.output -end - -function LeakyReLU:updateGradInput(input, gradOutput) - input.THNN.LeakyReLU_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.negval, - self.inplace - ) - return self.gradInput -end - -function LeakyReLU:__tostring__() - return torch.type(self) .. string.format('(%g)', self.negval) -end diff --git a/contrib/lua-torch/nn/Linear.lua b/contrib/lua-torch/nn/Linear.lua deleted file mode 100644 index 09b5979ce1..0000000000 --- a/contrib/lua-torch/nn/Linear.lua +++ /dev/null @@ -1,122 +0,0 @@ -local Linear, parent = torch.class('nn.Linear', 'nn.Module') - -function Linear:__init(inputSize, outputSize, bias) - parent.__init(self) - local bias = ((bias == nil) and true) or bias - self.weight = torch.Tensor(outputSize, inputSize) - self.gradWeight = torch.Tensor(outputSize, inputSize) - if bias then - self.bias = torch.Tensor(outputSize) - self.gradBias = torch.Tensor(outputSize) - end - self:reset() -end - -function Linear:noBias() - self.bias = nil - self.gradBias = nil - return self -end - -function Linear:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1./math.sqrt(self.weight:size(2)) - end - if nn.oldSeed then - for i=1,self.weight:size(1) do - self.weight:select(1, i):apply(function() - return torch.uniform(-stdv, stdv) - end) - end - if self.bias then - for i=1,self.bias:nElement() do - self.bias[i] = torch.uniform(-stdv, stdv) - end - end - else - self.weight:uniform(-stdv, stdv) - if self.bias then self.bias:uniform(-stdv, stdv) end - end - return self -end - -function Linear:updateAddBuffer(input) - local nframe = input:size(1) - self.addBuffer = self.addBuffer or input.new() - if self.addBuffer:nElement() ~= nframe then - self.addBuffer:resize(nframe):fill(1) - end -end - -function Linear:updateOutput(input) - if input:dim() == 1 then - self.output:resize(self.weight:size(1)) - if self.bias then self.output:copy(self.bias) else self.output:zero() end - self.output:addmv(1, self.weight, input) - elseif input:dim() == 2 then - local nframe = input:size(1) - local nElement = self.output:nElement() - self.output:resize(nframe, self.weight:size(1)) - if self.output:nElement() ~= nElement then - self.output:zero() - end - self:updateAddBuffer(input) - self.output:addmm(0, self.output, 1, input, self.weight:t()) - if self.bias then self.output:addr(1, self.addBuffer, self.bias) end - else - error('input must be vector or matrix') - end - - return self.output -end - -function Linear:updateGradInput(input, gradOutput) - if self.gradInput then - - local nElement = self.gradInput:nElement() - self.gradInput:resizeAs(input) - if self.gradInput:nElement() ~= nElement then - self.gradInput:zero() - end - if input:dim() == 1 then - self.gradInput:addmv(0, 1, self.weight:t(), gradOutput) - elseif input:dim() == 2 then - self.gradInput:addmm(0, 1, gradOutput, self.weight) - end - - return self.gradInput - end -end - -function Linear:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - if input:dim() == 1 then - self.gradWeight:addr(scale, gradOutput, input) - if self.bias then self.gradBias:add(scale, gradOutput) end - elseif input:dim() == 2 then - self.gradWeight:addmm(scale, gradOutput:t(), input) - if self.bias then - -- update the size of addBuffer if the input is not the same size as the one we had in last updateGradInput - self:updateAddBuffer(input) - self.gradBias:addmv(scale, gradOutput:t(), self.addBuffer) - end - end -end - -function Linear:sharedAccUpdateGradParameters(input, gradOutput, lr) - -- we do not need to accumulate parameters when sharing: - self:defaultAccUpdateGradParameters(input, gradOutput, lr) -end - -function Linear:clearState() - if self.addBuffer then self.addBuffer:set() end - return parent.clearState(self) -end - -function Linear:__tostring__() - return torch.type(self) .. - string.format('(%d -> %d)', self.weight:size(2), self.weight:size(1)) .. - (self.bias == nil and ' without bias' or '') -end diff --git a/contrib/lua-torch/nn/LinearWeightNorm.lua b/contrib/lua-torch/nn/LinearWeightNorm.lua deleted file mode 100755 index a712f55359..0000000000 --- a/contrib/lua-torch/nn/LinearWeightNorm.lua +++ /dev/null @@ -1,168 +0,0 @@ -local LinearWeightNorm, parent = torch.class('nn.LinearWeightNorm', 'nn.Linear') - -function LinearWeightNorm:__init(inputSize, outputSize, bias, eps) - nn.Module.__init(self) -- Skip nn.Linear constructor - - local bias = ((bias == nil) and true) or bias - - self.eps = eps or 1e-16 - - self.outputSize = outputSize - self.inputSize = inputSize - - self.v = torch.Tensor(outputSize, inputSize) - self.gradV = torch.Tensor(outputSize, inputSize) - - self.weight = torch.Tensor(outputSize, inputSize) - - self.g = torch.Tensor(outputSize,1) - self.gradG = torch.Tensor(outputSize,1) - - self.norm = torch.Tensor(outputSize,1) - self.scale = torch.Tensor(outputSize,1) - - if bias then - self.bias = torch.Tensor(outputSize) - self.gradBias = torch.Tensor(outputSize) - end - - self:reset() -end - -function LinearWeightNorm:evaluate() - if self.train ~= false then - self:updateWeightMatrix() - end - - parent.evaluate(self) -end - -function LinearWeightNorm:initFromWeight(weight) - weight = weight or self.weight - - self.g:norm(weight,2,2):clamp(self.eps,math.huge) - self.v:copy(weight) - - return self -end - -function LinearWeightNorm.fromLinear(linear) - local module = nn.LinearWeightNorm(linear.weight:size(2), linear.weight:size(1), torch.isTensor(linear.bias)) - module.weight:copy(linear.weight) - module:initFromWeight() - - if linear.bias then - module.bias:copy(linear.bias) - end - - return module -end - -function LinearWeightNorm:toLinear() - self:updateWeightMatrix() - - local module = nn.Linear(self.inputSize, self.outputSize, torch.isTensor(self.bias)) - - module.weight:copy(self.weight) - if self.bias then - module.bias:copy(self.bias) - end - - return module -end - -function LinearWeightNorm:parameters() - if self.bias then - return {self.v, self.g, self.bias}, {self.gradV, self.gradG, self.gradBias} - else - return {self.v, self.g}, {self.gradV, self.gradG} - end -end - -function LinearWeightNorm:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1 / math.sqrt(self.inputSize) - end - - self.weight:uniform(-stdv,stdv) - self:initFromWeight() - - if self.bias then - self.bias:uniform(-stdv,stdv) - end -end - -function LinearWeightNorm:updateWeightMatrix() - if self.norm:dim() == 0 then self.norm:resizeAs(self.g) end - if self.scale:dim() == 0 then self.scale:resizeAs(self.g) end - if self.weight:dim() == 0 then self.weight:resizeAs(self.v) end - - self.norm:norm(self.v,2,2):clamp(self.eps,math.huge) - self.scale:cdiv(self.g,self.norm) - self.weight:cmul(self.v,self.scale:expandAs(self.v)) -end - -function LinearWeightNorm:updateOutput(input) - if self.train ~= false then - self:updateWeightMatrix() - end - - return parent.updateOutput(self, input) -end - -function LinearWeightNorm:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - if input:dim() == 1 then - self.gradV:addr(scale, gradOutput, input) - if self.bias then self.gradBias:add(scale, gradOutput) end - elseif input:dim() == 2 then - self.gradV:addmm(scale, gradOutput:t(), input) - if self.bias then - -- update the size of addBuffer if the input is not the same size as the one we had in last updateGradInput - self:updateAddBuffer(input) - self.gradBias:addmv(scale, gradOutput:t(), self.addBuffer) - end - end - - local scale = self.scale:expandAs(self.v) - local norm = self.norm:expandAs(self.v) - - self.weight:cmul(self.gradV,self.v):cdiv(norm) - self.gradG:sum(self.weight,2) - - self.gradV:cmul(scale) - - self.weight:cmul(self.v,scale):cdiv(norm) - self.weight:cmul(self.gradG:expandAs(self.weight)) - - self.gradV:add(-1,self.weight) -end - -function LinearWeightNorm:defaultAccUpdateGradParameters(input, gradOutput, lr) - local gradV = self.gradV - local gradG = self.gradG - local gradBias = self.gradBias - - self.gradV = self.v - self.gradG = self.g - self.gradBias = self.bias - - self:accGradParameters(input, gradOutput, -lr) - - self.gradV = gradV - self.gradG = gradG - self.gradBias = gradBias -end - -function LinearWeightNorm:clearState() - nn.utils.clear(self, 'weight', 'norm', 'scale') - return parent.clearState(self) -end - -function LinearWeightNorm:__tostring__() - return torch.type(self) .. - string.format('(%d -> %d)', self.inputSize, self.outputSize) .. - (self.bias == nil and ' without bias' or '') -end \ No newline at end of file diff --git a/contrib/lua-torch/nn/Log.lua b/contrib/lua-torch/nn/Log.lua deleted file mode 100644 index e8f236bfb7..0000000000 --- a/contrib/lua-torch/nn/Log.lua +++ /dev/null @@ -1,20 +0,0 @@ -local Log, parent = torch.class('nn.Log', 'nn.Module') - -function Log:__init() - parent.__init(self) -end - -function Log:updateOutput(input) - self.output:resizeAs(input) - self.output:copy(input) - self.output:log() - return self.output -end - -function Log:updateGradInput(input, gradOutput) - self.gradInput:resizeAs(input) - self.gradInput:fill(1) - self.gradInput:cdiv(input) - self.gradInput:cmul(gradOutput) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/LogSigmoid.lua b/contrib/lua-torch/nn/LogSigmoid.lua deleted file mode 100644 index cab848f4d7..0000000000 --- a/contrib/lua-torch/nn/LogSigmoid.lua +++ /dev/null @@ -1,27 +0,0 @@ -local LogSigmoid, parent = torch.class('nn.LogSigmoid', 'nn.Module') - -function LogSigmoid:updateOutput(input) - self.buffer = self.buffer or input.new() - input.THNN.LogSigmoid_updateOutput( - input:cdata(), - self.output:cdata(), - self.buffer:cdata() - ) - return self.output -end - -function LogSigmoid:updateGradInput(input, gradOutput) - input.THNN.LogSigmoid_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.buffer:cdata() - ) - return self.gradInput -end - -function LogSigmoid:clearState() - if self.buffer then self.buffer:set() end - return parent.clearState(self) -end - diff --git a/contrib/lua-torch/nn/LogSoftMax.lua b/contrib/lua-torch/nn/LogSoftMax.lua deleted file mode 100644 index 37c8acae45..0000000000 --- a/contrib/lua-torch/nn/LogSoftMax.lua +++ /dev/null @@ -1,19 +0,0 @@ -local LogSoftMax = torch.class('nn.LogSoftMax', 'nn.Module') - -function LogSoftMax:updateOutput(input) - input.THNN.LogSoftMax_updateOutput( - input:cdata(), - self.output:cdata() - ) - return self.output -end - -function LogSoftMax:updateGradInput(input, gradOutput) - input.THNN.LogSoftMax_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.output:cdata() - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/LookupTable.lua b/contrib/lua-torch/nn/LookupTable.lua deleted file mode 100644 index 6cffc6c3e4..0000000000 --- a/contrib/lua-torch/nn/LookupTable.lua +++ /dev/null @@ -1,166 +0,0 @@ -local THNN = require 'nn.THNN' -local LookupTable, parent = torch.class('nn.LookupTable', 'nn.Module') - -LookupTable.__version = 4 - -function LookupTable:__init(nIndex, nOutput, paddingValue, maxNorm, normType) - parent.__init(self) - - self.weight = torch.Tensor(nIndex, nOutput) - self.gradWeight = torch.Tensor(nIndex, nOutput):zero() - self.paddingValue = paddingValue or 0 - self.maxNorm = maxNorm or nil - self.normType = normType or nil - - self:reset() -end - -function LookupTable:backCompatibility() - self._count = self._count or torch.IntTensor() - self._input = self._input or torch.LongTensor() - - if not self.shouldScaleGradByFreq then - self.shouldScaleGradByFreq = false - end -end - -function LookupTable:accUpdateOnly() - self.gradWeight = nil - return self -end - -function LookupTable:setPadding(paddingValue) - self.paddingValue = paddingValue - return self -end - -function LookupTable:setMaxNorm(maxNorm) - self.maxNorm = maxNorm - return self -end - -function LookupTable:setNormType(normType) - self.normType = normType - return self -end - -function LookupTable:scaleGradByFreq() - self.shouldScaleGradByFreq = true - return self -end - -function LookupTable:reset(stdv) - stdv = stdv or 1 - self.weight:normal(0, stdv) -end - -function LookupTable:makeInputContiguous(input) - -- make sure input is a contiguous torch.LongTensor - if (not input:isContiguous()) or torch.type(input) ~= torch.type(self._input) then - self.copiedInput = true - self._input:resize(input:size()):copy(input) - return self._input - end - self.copiedInput = false - return input -end - -function LookupTable:updateOutput(input) - self:backCompatibility() - self:renorm(input) - input = self:makeInputContiguous(input) - if input:dim() == 1 then - self.output:index(self.weight, 1, input) - elseif input:dim() == 2 then - self.output:index(self.weight, 1, input:view(-1)) - self.output = self.output:view(input:size(1), input:size(2), self.weight:size(2)) - else - error("input must be a vector or matrix") - end - return self.output -end - -function LookupTable:updateGradInput(input, gradOutput) - -- the input can be of any type (as in the forward it's - -- converted anyway to LongTensor) thus, need to allocate - -- new memory each time the user changes the input type - if torch.type(self.gradInput) ~= torch.type(input) then - self.gradInput = input.new() - end - if not self.gradInput:isSameSizeAs(input) then - self.gradInput:resizeAs(input):zero() - end - return self.gradInput -end - -function LookupTable:accGradParameters(input, gradOutput, scale) - self:backCompatibility() - input = self.copiedInput and self._input or input - if input:dim() == 2 then - input = input:view(-1) - elseif input:dim() ~= 1 then - error("input must be a vector or matrix") - end - - self.gradWeight.THNN.LookupTable_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - self._count:cdata(), - THNN.optionalTensor(self._sorted), - THNN.optionalTensor(self._indices), - self.shouldScaleGradByFreq or false, - self.paddingValue or 0, - scale or 1 - ) -end - -function LookupTable:renorm(input) - if not self.maxNorm then - return - end - -- copy input into _input, so _input is continuous. - -- The copied _input will be modified in the C code. - self._input:resize(input:size()):copy(input) - local row_idx = self._input - if row_idx:dim() == 2 then - row_idx = row_idx:view(-1) - elseif row_idx:dim() ~= 1 then - error("input must be a vector or matrix") - end - -- "row_idx" and "weight" will be modified in the C code - self.weight.THNN.LookupTable_renorm( - row_idx:cdata(), - self.weight:cdata(), - self.maxNorm, - self.normType or 2 - ) -end - -function LookupTable:type(type, tensorCache) - parent.type(self, type, tensorCache) - - if type and type:find('torch%.Cuda.*Tensor') then - -- CUDA uses _sorted and _indices temporary tensors - self._sorted = torch.CudaLongTensor and torch.CudaLongTensor.new() or torch.CudaTensor.new() - self._indices = torch.CudaLongTensor and torch.CudaLongTensor.new() or torch.CudaTensor.new() - self._count = torch.CudaLongTensor and torch.CudaLongTensor.new() or torch.CudaTensor.new() - self._input = torch.CudaLongTensor and torch.CudaLongTensor.new() or torch.CudaTensor.new() - else - -- self._count and self._input should only be converted if using Cuda - self._count = torch.IntTensor() - self._input = torch.LongTensor() - end - - return self -end - -function LookupTable:clearState() - nn.utils.clear(self, '_count', '_input') - return parent.clearState(self) -end - -function LookupTable:sharedAccUpdateGradParameters(input, gradOutput, lr) - -- we do not need to accumulate parameters when sharing: - self:defaultAccUpdateGradParameters(input, gradOutput, lr) -end diff --git a/contrib/lua-torch/nn/MM.lua b/contrib/lua-torch/nn/MM.lua deleted file mode 100644 index cc978c8cba..0000000000 --- a/contrib/lua-torch/nn/MM.lua +++ /dev/null @@ -1,92 +0,0 @@ ---[[ Module to perform matrix multiplication on two minibatch inputs, - producing a minibatch. -]] - -local MM, parent = torch.class('nn.MM', 'nn.Module') - ---[[ The constructor takes two optional arguments, specifying whether or not transpose - any of the input matrices before perfoming the multiplication. -]] -function MM:__init(transA, transB) - parent.__init(self) - - self.transA = transA or false - self.transB = transB or false - - self.gradInput = {torch.Tensor(), torch.Tensor()} -end - -function MM:updateOutput(input) - assert(#input == 2, 'input must be a pair of minibatch matrices') - local a, b = table.unpack(input) - assert(a:nDimension() == 2 or a:nDimension() == 3, 'input tensors must be 2D or 3D') - - if a:nDimension() == 2 then - assert(b:nDimension() == 2, 'second input tensor must be 2D') - - if self.transA then a = a:t() end - if self.transB then b = b:t() end - assert(a:size(2) == b:size(1), 'matrix sizes do not match') - - self.output:resize(a:size(1), b:size(2)) - self.output:mm(a, b) - else - assert(b:nDimension() == 3, 'second input tensor must be 3D') - assert(a:size(1) == b:size(1), 'inputs must contain the same number of minibatches') - - if self.transA then a = a:transpose(2, 3) end - if self.transB then b = b:transpose(2, 3) end - assert(a:size(3) == b:size(2), 'matrix sizes do not match') - - self.output:resize(a:size(1), a:size(2), b:size(3)) - self.output:bmm(a, b) - end - - return self.output -end - -function MM:updateGradInput(input, gradOutput) - self.gradInput[1] = self.gradInput[1] or input[1].new() - self.gradInput[2] = self.gradInput[2] or input[2].new() - - assert(#input == 2, 'input must be a pair of tensors') - local a, b = table.unpack(input) - self.gradInput[1]:resizeAs(a) - self.gradInput[2]:resizeAs(b) - - assert(gradOutput:nDimension() == 2 or gradOutput:nDimension() == 3, 'arguments must be a 2D or 3D Tensor') - - local h_dim, w_dim, f - if gradOutput:nDimension() == 2 then - assert(a:nDimension() == 2, 'first input tensor must be 2D') - assert(b:nDimension() == 2, 'second input tensor must be 2D') - - h_dim, w_dim = 1, 2 - f = "mm" - else - assert(a:nDimension() == 3, 'first input tensor must be 3D') - assert(b:nDimension() == 3, 'second input tensor must be 3D') - - h_dim, w_dim = 2, 3 - f = "bmm" - end - - if self.transA == self.transB then - a = a:transpose(h_dim, w_dim) - b = b:transpose(h_dim, w_dim) - end - - if self.transA then - self.gradInput[1][f](self.gradInput[1], b, gradOutput:transpose(h_dim, w_dim)) - else - self.gradInput[1][f](self.gradInput[1], gradOutput, b) - end - - if self.transB then - self.gradInput[2][f](self.gradInput[2], gradOutput:transpose(h_dim, w_dim), a) - else - self.gradInput[2][f](self.gradInput[2], a, gradOutput) - end - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/MSECriterion.lua b/contrib/lua-torch/nn/MSECriterion.lua deleted file mode 100644 index d38beb6bf9..0000000000 --- a/contrib/lua-torch/nn/MSECriterion.lua +++ /dev/null @@ -1,32 +0,0 @@ -local MSECriterion, parent = torch.class('nn.MSECriterion', 'nn.Criterion') - -function MSECriterion:__init(sizeAverage) - parent.__init(self) - if sizeAverage ~= nil then - self.sizeAverage = sizeAverage - else - self.sizeAverage = true - end -end - -function MSECriterion:updateOutput(input, target) - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.MSECriterion_updateOutput( - input:cdata(), - target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage - ) - self.output = self.output_tensor[1] - return self.output -end - -function MSECriterion:updateGradInput(input, target) - input.THNN.MSECriterion_updateGradInput( - input:cdata(), - target:cdata(), - self.gradInput:cdata(), - self.sizeAverage - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/MV.lua b/contrib/lua-torch/nn/MV.lua deleted file mode 100644 index a00478ef6d..0000000000 --- a/contrib/lua-torch/nn/MV.lua +++ /dev/null @@ -1,82 +0,0 @@ ---[[ Module to perform matrix vector multiplication on two minibatch inputs, -producing a minibatch. -]] - -local MV, parent = torch.class('nn.MV', 'nn.Module') - --- Backward compatibility -local unpack = unpack or table.unpack - -function MV:__init(trans) - parent.__init(self) - - self.trans = trans or false - assert(type(self.trans) == 'boolean', "argument must be a boolean, matrix transpose before multiplication") - - self.gradInput = {torch.Tensor(), torch.Tensor()} -end - -function MV:updateOutput(input) - assert(#input == 2, 'input must be a pair of minibatch matrices') - local M, v = unpack(input) - assert(M:nDimension() == 2 or M:nDimension() == 3, 'input matrix must be 2D or 3D') - assert(v:nDimension() == 1 or v:nDimension() == 2, 'input vector must be 1D or 2D') - - if M:nDimension() == 2 then - assert(v:nDimension() == 1, 'vector must be 1D') - - if self.trans then M = M:transpose(1,2) end - assert(M:size(2) == v:size(1), 'matrix row count and vector length do not match') - - self.output:resize(M:size(1)) - self.output:mv(M, v) - else - assert(v:nDimension() == 2, 'vector must be 2D (batch dimension)') - assert(M:size(1) == v:size(1), 'inputs must contain the same number of minibatches') - - if self.trans then M = M:transpose(2,3) end - assert(M:size(3) == v:size(2), 'matrix row count and vector length do not match') - - self.output:resize(M:size(1), M:size(2), 1) - self.output:bmm(M, v:view(v:size(1), v:size(2), 1)):resize(M:size(1), M:size(2)) - end - - return self.output -end - -function MV:updateGradInput(input, gradOutput) - assert(#input == 2, 'input must be a pair of tensors') - local M, v = unpack(input) - self.gradInput[1]:resizeAs(M) - self.gradInput[2]:resizeAs(v) - - assert(gradOutput:nDimension() == 1 or gradOutput:nDimension() == 2, 'arguments must be a 1D or 2D Tensor') - - if gradOutput:nDimension() == 2 then - assert(M:nDimension() == 3, 'matrix must must be 3D (batched)') - assert(v:nDimension() == 2, 'vector must be 2D (batched)') - local bdim = M:size(1) - local odim = M:size(2) - local idim = M:size(3) - - if self.trans then - self.gradInput[1]:bmm(v:view(bdim, odim, 1), gradOutput:view(bdim, 1, idim)) - self.gradInput[2]:view(bdim, odim, 1):bmm(M, gradOutput:view(bdim, idim, 1)) - else - self.gradInput[1]:bmm(gradOutput:view(bdim, odim, 1), v:view(bdim, 1, idim)) - self.gradInput[2]:view(bdim, idim, 1):bmm(M:transpose(2,3), gradOutput:view(bdim, odim, 1)) - end - else - assert(M:nDimension() == 2, 'matrix must be 2D') - assert(v:nDimension() == 1, 'vector must be 1D') - - if self.trans then - self.gradInput[1]:ger(v, gradOutput) - self.gradInput[2] = M * gradOutput - else - self.gradInput[1]:ger(gradOutput, v) - self.gradInput[2] = M:t() * gradOutput - end - end - return self.gradInput -end diff --git a/contrib/lua-torch/nn/MapTable.lua b/contrib/lua-torch/nn/MapTable.lua deleted file mode 100644 index c79f1ea1d8..0000000000 --- a/contrib/lua-torch/nn/MapTable.lua +++ /dev/null @@ -1,119 +0,0 @@ -local MapTable, parent = torch.class('nn.MapTable', 'nn.Container') - -function MapTable:__init(module, shared) - parent.__init(self) - self.shared = (shared == nil) and true or shared - self.sharedparams = {'weight', 'bias', 'gradWeight', 'gradBias'} - self.output = {} - self.gradInput = {} - self:add(module) -end - -function MapTable:_extend(n) - self.sharedparams = self.sharedparams or {'weight', 'bias', 'gradWeight', 'gradBias'} - self.modules[1] = self.module - for i = 2, n do - if not self.modules[i] then - if self.shared then - self.modules[i] = self.module:clone(table.unpack(self.sharedparams)) - else - self.modules[i] = self.module:clone() - end - end - end -end - -function MapTable:resize(n) - self:_extend(n) - for i = n + 1, #self.modules do - -- It's not clear why this clearState call is necessary, but it fixes - -- https://github.com/torch/nn/issues/1141 . - self.modules[i]:clearState() - self.modules[i] = nil - end -end - -function MapTable:add(module) - assert(not self.module, 'Single module required') - self.module = module - self.modules[1] = self.module - return self -end - -function MapTable:updateOutput(input) - self.output = {} - self:_extend(#input) - for i = 1, #input do - self.output[i] = self:rethrowErrors(self.modules[i], i, 'updateOutput', input[i]) - end - return self.output -end - -function MapTable:updateGradInput(input, gradOutput) - self.gradInput = {} - self:_extend(#input) - for i = 1, #input do - self.gradInput[i] = self:rethrowErrors(self.modules[i], i, 'updateGradInput', input[i], gradOutput[i]) - end - return self.gradInput -end - -function MapTable:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - self:_extend(#input) - for i = 1, #input do - self:rethrowErrors(self.modules[i], i, 'accGradParameters', input[i], gradOutput[i], scale) - end -end - -function MapTable:accUpdateGradParameters(input, gradOutput, lr) - lr = lr or 1 - self:_extend(#input) - for i = 1, #input do - self:rethrowErrors(self.modules[i], i, 'accUpdateGradParameters', input[i], gradOutput[i], lr) - end -end - -function MapTable:zeroGradParameters() - if self.module then - if self.shared then - self.module:zeroGradParameters() - else - parent.zeroGradParameters(self) - end - end -end - -function MapTable:updateParameters(learningRate) - if self.module then - if self.shared then - self.module:updateParameters(learningRate) - else - parent.updateParameters(self, learningRate) - end - end -end - -function MapTable:clearState() - for i = 2, #self.modules do - -- It's not clear why this clearState call is necessary, but it fixes - -- https://github.com/torch/nn/issues/1141 . - self.modules[i]:clearState() - self.modules[i] = nil - end - parent.clearState(self) -end - -function MapTable:__tostring__() - local tab = ' ' - local line = '\n' - local extlast = ' ' - local str = torch.type(self) - if self.module then - str = str .. ' {' .. line .. tab - str = str .. tostring(self.module):gsub(line, line .. tab .. extlast) .. line .. '}' - else - str = str .. ' { }' - end - return str -end diff --git a/contrib/lua-torch/nn/MarginCriterion.lua b/contrib/lua-torch/nn/MarginCriterion.lua deleted file mode 100644 index 1ab8ad7848..0000000000 --- a/contrib/lua-torch/nn/MarginCriterion.lua +++ /dev/null @@ -1,31 +0,0 @@ -local MarginCriterion, parent = torch.class('nn.MarginCriterion', 'nn.Criterion') - -function MarginCriterion:__init(margin) - parent.__init(self) - self.sizeAverage = true - self.margin = margin or 1 -end - -function MarginCriterion:updateOutput(input, target) - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.MarginCriterion_updateOutput( - input:cdata(), - target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage, - self.margin - ) - self.output = self.output_tensor[1] - return self.output -end - -function MarginCriterion:updateGradInput(input, target) - input.THNN.MarginCriterion_updateGradInput( - input:cdata(), - target:cdata(), - self.gradInput:cdata(), - self.sizeAverage, - self.margin - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/MarginRankingCriterion.lua b/contrib/lua-torch/nn/MarginRankingCriterion.lua deleted file mode 100644 index 844d905d51..0000000000 --- a/contrib/lua-torch/nn/MarginRankingCriterion.lua +++ /dev/null @@ -1,75 +0,0 @@ -local MarginRankingCriterion, parent = torch.class('nn.MarginRankingCriterion', 'nn.Criterion') - -function MarginRankingCriterion:__init(margin) - parent.__init(self) - margin=margin or 1 - self.margin = margin - self.gradInput = {torch.Tensor(1), torch.Tensor(1)} - self.sizeAverage = true -end - -function MarginRankingCriterion:updateOutput(input, y) - if torch.type(y) == 'number' then -- non-batch mode - self.output = math.max(0, -y * (input[1][1] - input[2][1]) + self.margin) - else - self._output = self._output or input[1]:clone() - self._output:resizeAs(input[1]) - self._output:copy(input[1]) - - self._output:add(-1, input[2]) - self._output:mul(-1):cmul(y) - self._output:add(self.margin) - - self._output:cmax(0) - - self.output = self._output:sum() - - if self.sizeAverage then - self.output = self.output/y:size(1) - end - end - - return self.output -end - -function MarginRankingCriterion:updateGradInput(input, y) - if torch.type(y) == 'number' then -- non-batch mode - local dist = -y * (input[1][1] - input[2][1]) + self.margin - if dist < 0 then - self.gradInput[1][1] = 0; - self.gradInput[2][1] = 0; - else - self.gradInput[1][1] = -y - self.gradInput[2][1] = y - end - else - self.dist = self.dist or input[1].new() - self.dist = self.dist:resizeAs(input[1]):copy(input[1]) - local dist = self.dist - - dist:add(-1, input[2]) - dist:mul(-1):cmul(y) - dist:add(self.margin) - - self.mask = self.mask or input[1].new() - self.mask = self.mask:resizeAs(input[1]):copy(dist) - local mask = self.mask - - mask:ge(dist, 0) - - self.gradInput[1]:resize(dist:size()) - self.gradInput[2]:resize(dist:size()) - - self.gradInput[1]:copy(mask) - self.gradInput[1]:mul(-1):cmul(y) - self.gradInput[2]:copy(mask) - self.gradInput[2]:cmul(y) - - if self.sizeAverage then - self.gradInput[1]:div(y:size(1)) - self.gradInput[2]:div(y:size(1)) - end - - end - return self.gradInput -end diff --git a/contrib/lua-torch/nn/MaskedSelect.lua b/contrib/lua-torch/nn/MaskedSelect.lua deleted file mode 100644 index c3f7834e17..0000000000 --- a/contrib/lua-torch/nn/MaskedSelect.lua +++ /dev/null @@ -1,71 +0,0 @@ -local unpack = unpack or table.unpack - -local MaskedSelect, parent = torch.class('nn.MaskedSelect', 'nn.Module') - ---[[ Sets the provided mask value for the module. ]] -function MaskedSelect:__init() - parent.__init(self) - self._maskIndices = torch.LongTensor() - self._maskIndexBuffer = torch.LongTensor() - self._maskIndexBufferCPU = torch.FloatTensor() - self._gradBuffer = torch.Tensor() - self._gradMask = torch.ByteTensor() -end - ---[[ Performs maskedSelect operation. ]] -function MaskedSelect:updateOutput(input) - local input, mask = unpack(input) - self.output:maskedSelect(input, mask) - return self.output -end - ---[[ Reverse maps unmasked gradOutput back to gradInput. ]] -function MaskedSelect:updateGradInput(input, gradOutput) - local input, mask = unpack(input) - if input:type() == 'torch.CudaTensor' then - self._maskIndexBufferCPU:range(1, mask:nElement()):resize(mask:size()) - self._maskIndexBuffer:resize( - self._maskIndexBufferCPU:size()):copy(self._maskIndexBufferCPU) - else - self._maskIndexBuffer:range(1, mask:nElement()):resize(mask:size()) - end - self._maskIndices:maskedSelect(self._maskIndexBuffer, mask) - self._gradBuffer:resize(input:nElement()):zero() - self._gradBuffer:scatter(1, self._maskIndices, gradOutput) - self._gradBuffer:resize(input:size()) - self.gradInput = {self._gradBuffer, - self._gradMask:resize(mask:size()):fill(0)} - return self.gradInput -end - -function MaskedSelect:type(type, tensorCache) - if not type then - return self._type - end - self._gradBuffer = self._gradBuffer:type(type) - self.gradInput = self.gradInput:type(type) - self.output = self.output:type(type) - - -- These casts apply when switching between cuda/non-cuda types - if type ~= 'torch.CudaTensor' then - self._maskIndexBuffer = self._maskIndexBuffer:long() - self._maskIndices = self._maskIndices:long() - self._gradMask = self._gradMask:byte() - elseif type == 'torch.CudaTensor' then - self._maskIndexBuffer = self._maskIndexBuffer:cuda() - self._maskIndices = self._maskIndices:cuda() - self._gradMask = self._gradMask:cuda() - end - self._type = type - return self -end - -function MaskedSelect:clearState() - return nn.utils.clear(self, {'output', - 'gradInput', - '_maskIndexBuffer', - '_maskIndexBufferCPU', - '_maskIndices', - '_gradBuffer', - '_gradMask'}) -end diff --git a/contrib/lua-torch/nn/Max.lua b/contrib/lua-torch/nn/Max.lua deleted file mode 100644 index 8273e808cb..0000000000 --- a/contrib/lua-torch/nn/Max.lua +++ /dev/null @@ -1,66 +0,0 @@ -local Max, parent = torch.class('nn.Max', 'nn.Module') - -function Max:__init(dimension, nInputDims) - parent.__init(self) - dimension = dimension or 1 - self.dimension = dimension - -- do not assign default value to nInputDims or it will break backward compatibility - self.nInputDims = nInputDims -end - -function Max:_getPositiveDimension(input) - local dimension = self.dimension - if dimension < 0 then - dimension = input:dim() + dimension + 1 - elseif self.nInputDims and input:dim()==(self.nInputDims+1) then - dimension = dimension + 1 - end - return dimension -end - -function Max:_lazyInit() - self._output = self._output or self.output.new() - if not self._indices then - if torch.typename(self.output):find('torch%.Cuda.*Tensor') then - self._indices = torch.CudaLongTensor and torch.CudaLongTensor() or torch.CudaTensor() - else - self._indices = torch.LongTensor() - end - end -end - -function Max:updateOutput(input) - self:_lazyInit() - local dimension = self:_getPositiveDimension(input) - torch.max(self._output, self._indices, input, dimension) - if input:dim() > 1 then - self.output:set(self._output:select(dimension, 1)) - else - self.output:set(self._output) - end - return self.output -end - -function Max:updateGradInput(input, gradOutput) - self:_lazyInit() - local dimension = self:_getPositiveDimension(input) - local gradOutputView - if input:dim() > 1 then - gradOutputView = nn.utils.addSingletonDimension(gradOutput, dimension) - else - gradOutputView = gradOutput - end - self.gradInput:resizeAs(input):zero():scatter(dimension, self._indices, gradOutputView) - return self.gradInput -end - -function Max:type(type, tensorCache) - self._indices = nil - parent.type(self, type, tensorCache) - return self -end - -function Max:clearState() - nn.utils.clear(self, '_indices', '_output') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/Maxout.lua b/contrib/lua-torch/nn/Maxout.lua deleted file mode 100644 index a797a9f438..0000000000 --- a/contrib/lua-torch/nn/Maxout.lua +++ /dev/null @@ -1,13 +0,0 @@ --- Reference: http://jmlr.org/proceedings/papers/v28/goodfellow13.pdf - -local Maxout, parent = torch.class('nn.Maxout', 'nn.Sequential') - -function Maxout:__init(inputSize, outputSize, maxoutNumber, preprocess) - parent.__init(self) - self:add(nn.Linear(inputSize, outputSize * maxoutNumber)) - self:add(nn.View(maxoutNumber, outputSize):setNumInputDims(1)) - if preprocess then - self:add(preprocess) - end - self:add(nn.Max(1, 2)) -end diff --git a/contrib/lua-torch/nn/Mean.lua b/contrib/lua-torch/nn/Mean.lua deleted file mode 100644 index 8087ac95e7..0000000000 --- a/contrib/lua-torch/nn/Mean.lua +++ /dev/null @@ -1,14 +0,0 @@ -local Mean, parent = torch.class('nn.Mean', 'nn.Sum') - ---[[ - -This file is still here because of backward compatibility. - -Please use instead "nn.Sum(dimension, nInputDims, sizeAverage)" - -]]-- - - -function Mean:__init(dimension, nInputDims) - parent.__init(self, dimension, nInputDims, true) -end diff --git a/contrib/lua-torch/nn/Min.lua b/contrib/lua-torch/nn/Min.lua deleted file mode 100644 index 3a3e4a8021..0000000000 --- a/contrib/lua-torch/nn/Min.lua +++ /dev/null @@ -1,66 +0,0 @@ -local Min, parent = torch.class('nn.Min', 'nn.Module') - -function Min:__init(dimension, nInputDims) - parent.__init(self) - dimension = dimension or 1 - self.dimension = dimension - -- do not assign default value to nInputDims or it will break backward compatibility - self.nInputDims = nInputDims -end - -function Min:_getPositiveDimension(input) - local dimension = self.dimension - if dimension < 0 then - dimension = input:dim() + dimension + 1 - elseif self.nInputDims and input:dim()==(self.nInputDims+1) then - dimension = dimension + 1 - end - return dimension -end - -function Min:_lazyInit() - self._output = self._output or self.output.new() - if not self._indices then - if torch.typename(self.output):find('torch%.Cuda.*Tensor') then - self._indices = torch.CudaLongTensor and torch.CudaLongTensor() or torch.CudaTensor() - else - self._indices = torch.LongTensor() - end - end -end - -function Min:updateOutput(input) - self:_lazyInit() - local dimension = self:_getPositiveDimension(input) - torch.min(self._output, self._indices, input, dimension) - if input:dim() > 1 then - self.output:set(self._output:select(dimension, 1)) - else - self.output:set(self._output) - end - return self.output -end - -function Min:updateGradInput(input, gradOutput) - self:_lazyInit() - local dimension = self:_getPositiveDimension(input) - local gradOutputView - if input:dim() > 1 then - gradOutputView = nn.utils.addSingletonDimension(gradOutput, dimension) - else - gradOutputView = gradOutput - end - self.gradInput:resizeAs(input):zero():scatter(dimension, self._indices, gradOutputView) - return self.gradInput -end - -function Min:type(type, tensorCache) - self._indices = nil - parent.type(self, type, tensorCache) - return self -end - -function Min:clearState() - nn.utils.clear(self, '_indices', '_output') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/MixtureTable.lua b/contrib/lua-torch/nn/MixtureTable.lua deleted file mode 100644 index dbe19742f0..0000000000 --- a/contrib/lua-torch/nn/MixtureTable.lua +++ /dev/null @@ -1,165 +0,0 @@ -local MixtureTable, parent = torch.class('nn.MixtureTable', 'nn.Module') - -function MixtureTable:__init(dim) - parent.__init(self) - self.dim = dim - self.size = torch.LongStorage() - self.batchSize = 0 - self.size2 = torch.LongStorage() - self.backwardSetup = false - self.gradInput = {} -end - -function MixtureTable:updateOutput(input) - local gaterInput, expertInputs = table.unpack(input) - - -- buffers - self._gaterView = self._gaterView or input[1].new() - self._expert = self._expert or input[1].new() - self._expertView = self._expertView or input[1].new() - - self.dimG = 2 - local batchSize = gaterInput:size(1) - if gaterInput:dim() < 2 then - self.dimG = 1 - self.dim = self.dim or 1 - batchSize = 1 - end - self.dim = self.dim or 2 - - if self.table or torch.type(expertInputs) == 'table' then - -- expertInputs is a Table : - self.table = true - if gaterInput:size(self.dimG) ~= #expertInputs then - error"Should be one gater output per expert" - end - local expertInput = expertInputs[1] - self.size:resize(expertInput:dim()+1):fill(1) - if self.dimG > 1 then - self.size[1] = gaterInput:size(1) - end - self.size[self.dim] = gaterInput:size(self.dimG) - self.output:resizeAs(expertInput) - self.batchSize = batchSize - self._gaterView:view(gaterInput, self.size) - self.output:zero() - -- multiply accumulate gater outputs by their commensurate expert - for i,expertInput in ipairs(expertInputs) do - local gate = self._gaterView:select(self.dim,i):expandAs(expertInput) - self.output:addcmul(expertInput, gate) - end - else - -- expertInputs is a Tensor : - self.size:resize(expertInputs:dim()):fill(1) - if self.dimG > 1 then - self.size[1] = gaterInput:size(1) - end - self.size[self.dim] = gaterInput:size(self.dimG) - self.output:resizeAs(expertInputs:select(self.dim, 1)) - self.batchSize = batchSize - self._gaterView:view(gaterInput, self.size) - self._expert:cmul(self._gaterView:expandAs(expertInputs), expertInputs) - self.output:sum(self._expert, self.dim) - self.output:resizeAs(expertInputs:select(self.dim, 1)) - end - - return self.output -end - -function MixtureTable:updateGradInput(input, gradOutput) - local gaterInput, expertInputs = table.unpack(input) - nn.utils.recursiveResizeAs(self.gradInput, input) - local gaterGradInput, expertGradInputs = table.unpack(self.gradInput) - - -- buffers - self._sum = self._sum or input[1].new() - self._expertView2 = self._expertView2 or input[1].new() - self._expert2 = self._expert2 or input[1].new() - - if self.table then - for i,expertInput in ipairs(expertInputs) do - local expertGradInput = expertGradInputs[i] or expertInput:clone() - expertGradInput:resizeAs(expertInput) - expertGradInputs[i] = expertGradInput - end - gaterGradInput:resizeAs(gaterInput) - - -- Clear invalid gradients - if #expertGradInputs > #expertInputs then - for i=#expertInputs+1, #expertGradInputs do - expertGradInputs[i] = nil - end - end - - -- like CMulTable, but with broadcasting - for i,expertGradInput in ipairs(expertGradInputs) do - -- gater updateGradInput - self._expert:cmul(gradOutput, expertInputs[i]) - if self.dimG == 1 then - self._expertView:view(self._expert, -1) - else - self._expertView:view(self._expert, gradOutput:size(1), -1) - end - self._sum:sum(self._expertView, self.dimG) - if self.dimG == 1 then - gaterGradInput[i] = self._sum:select(self.dimG,1) - else - gaterGradInput:select(self.dimG,i):copy(self._sum:select(self.dimG,1)) - end - - -- expert updateGradInput - local gate = self._gaterView:select(self.dim,i):expandAs(expertGradInput) - expertGradInput:cmul(gate, gradOutput) - end - else - self.size2:resize(expertInputs:dim()) - self.size2:copy(expertInputs:size()) - self.size2[self.dim] = 1 - gaterGradInput:resizeAs(gaterInput) - - -- gater updateGradInput - self._expertView:view(gradOutput, self.size2) - local gradOutput = self._expertView:expandAs(expertInputs) - self._expert:cmul(gradOutput, expertInputs) - local expert = self._expert:transpose(self.dim, self.dimG) - if not expert:isContiguous() then - self._expert2:resizeAs(expert) - self._expert2:copy(expert) - expert = self._expert2 - end - if self.dimG == 1 then - self._expertView2:view(expert, gaterInput:size(1), -1) - else - self._expertView2:view(expert, gaterInput:size(1), gaterInput:size(2), -1) - end - gaterGradInput:sum(self._expertView2, self.dimG+1) - gaterGradInput:resizeAs(gaterInput) - - -- expert updateGradInput - expertGradInputs:cmul(self._gaterView:expandAs(expertInputs), gradOutput) - end - - return self.gradInput -end - -function MixtureTable:type(type, tensorCache) - self._gaterView = nil - self._expert = nil - self._expertView = nil - self._sum = nil - self._expert2 = nil - self._expertView2 = nil - return parent.type(self, type, tensorCache) -end - -function MixtureTable:clearState() - nn.utils.clear(self, { - '_gaterView', - '_expert', - '_expertView', - '_sum', - '_expert2', - '_expertView2', - }) - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/Module.lua b/contrib/lua-torch/nn/Module.lua deleted file mode 100644 index 3debc57892..0000000000 --- a/contrib/lua-torch/nn/Module.lua +++ /dev/null @@ -1,429 +0,0 @@ -local Module = torch.class('nn.Module') - -function Module:__init() - self.gradInput = torch.Tensor() - self.output = torch.Tensor() - self._type = self.output:type() -end - -function Module:parameters() - if self.weight and self.bias then - return {self.weight, self.bias}, {self.gradWeight, self.gradBias} - elseif self.weight then - return {self.weight}, {self.gradWeight} - elseif self.bias then - return {self.bias}, {self.gradBias} - else - return - end -end - -function Module:updateOutput(input) - return self.output -end - -function Module:forward(input) - return self:updateOutput(input) -end - -function Module:backward(input, gradOutput, scale) - scale = scale or 1 - self:updateGradInput(input, gradOutput) - self:accGradParameters(input, gradOutput, scale) - return self.gradInput -end - -function Module:backwardUpdate(input, gradOutput, lr) - self:updateGradInput(input, gradOutput) - self:accUpdateGradParameters(input, gradOutput, lr) - return self.gradInput -end - -function Module:updateGradInput(input, gradOutput) - return self.gradInput -end - -function Module:accGradParameters(input, gradOutput, scale) -end - -function Module:accUpdateGradParameters(input, gradOutput, lr) - if self.shared then - self:sharedAccUpdateGradParameters(input, gradOutput, lr) - else - self:defaultAccUpdateGradParameters(input, gradOutput, lr) - end -end - -function Module:defaultAccUpdateGradParameters(input, gradOutput, lr) - local gradWeight = self.gradWeight - local gradBias = self.gradBias - self.gradWeight = self.weight - self.gradBias = self.bias - self:accGradParameters(input, gradOutput, -lr) - self.gradWeight = gradWeight - self.gradBias = gradBias -end - -function Module:sharedAccUpdateGradParameters(input, gradOutput, lr) - if self:parameters() then - self:zeroGradParameters() - self:accGradParameters(input, gradOutput, 1) - self:updateParameters(lr) - end -end - -function Module:zeroGradParameters() - local _,gradParams = self:parameters() - if gradParams then - for i=1,#gradParams do - gradParams[i]:zero() - end - end -end - -function Module:updateParameters(learningRate) - local params, gradParams = self:parameters() - if params then - for i=1,#params do - params[i]:add(-learningRate, gradParams[i]) - end - end -end - -function Module:training() - self.train = true -end - -function Module:evaluate() - self.train = false -end - -function Module:share(mlp, ...) - local arg = {...} - for i,v in ipairs(arg) do - if self[v] ~= nil then - self[v]:set(mlp[v]) - self.shared = true - mlp.shared = true - end - end - return self -end - -local function sharedWrite(...) - local arg = {...} - local shared = {} - for i,v in ipairs(arg) do - shared[v] = true - end - return function(self, file) - local object = {} - for k, v in pairs(self) do - if shared[k] then - assert(torch.isTensor(v), 'Shared parameters have to be Tensors') - object[k] = v.new() - else - object[k] = v - end - end - file:writeObject(object) - end -end - -function Module:clone(...) - local oldWrite = nn.Module.write - nn.Module.write = sharedWrite(...) - - local f = torch.MemoryFile("rw"):binary() - f:writeObject(self) - f:seek(1) - local clone = f:readObject() - f:close() - - nn.Module.write = oldWrite - - if select('#',...) > 0 then - clone:share(self,...) - end - return clone -end - -function Module:type(type, tensorCache) - if not type then - return self._type - end - - tensorCache = tensorCache or {} - - -- find all tensors and convert them - for key,param in pairs(self) do - self[key] = nn.utils.recursiveType(param, type, tensorCache) - end - - self._type = type - return self -end - -function Module:float(...) - return self:type('torch.FloatTensor',...) -end - -function Module:double(...) - return self:type('torch.DoubleTensor',...) -end - -function Module:cuda(...) - return self:type('torch.CudaTensor',...) -end - -function Module:reset() -end - -function Module:write(file) - -- Write all values in the object as a table. - local object = {} - for k, v in pairs(self) do - object[k] = v - end - file:writeObject(object) -end - -function Module:read(file) - local object = file:readObject() - for k, v in pairs(object) do - self[k] = v - end -end - --- This function is not easy to understand. It works as follows: --- --- - gather all parameter tensors for this module (and children); --- count all parameter values (floats) --- - create one ginormous memory area (Storage object) with room for all --- parameters --- - remap each parameter tensor to point to an area within the ginormous --- Storage, and copy it there --- --- It has the effect of making all parameters point to the same memory area, --- which is then returned. --- --- The purpose is to allow operations over all parameters (such as momentum --- updates and serialization), but it assumes that all parameters are of --- the same type (and, in the case of CUDA, on the same device), which --- is not always true. Use for_each() to iterate over this module and --- children instead. --- --- Module._flattenTensorBuffer can be used by other packages (e.g. cunn) --- to specify the type of temporary buffers. For example, the temporary --- buffers for CudaTensor could be FloatTensor, to avoid GPU memory usage. --- --- TODO: This logically belongs to torch.Tensor, not nn. -Module._flattenTensorBuffer = {} -function Module.flatten(parameters) - - -- returns true if tensor occupies a contiguous region of memory (no holes) - local function isCompact(tensor) - local sortedStride, perm = torch.sort( - torch.LongTensor(tensor:nDimension()):set(tensor:stride()), 1, true) - local sortedSize = torch.LongTensor(tensor:nDimension()):set( - tensor:size()):index(1, perm) - local nRealDim = torch.clamp(sortedStride, 0, 1):sum() - sortedStride = sortedStride:narrow(1, 1, nRealDim):clone() - sortedSize = sortedSize:narrow(1, 1, nRealDim):clone() - local t = tensor.new():set(tensor:storage(), 1, - sortedSize:storage(), - sortedStride:storage()) - return t:isContiguous() - end - - if not parameters or #parameters == 0 then - return torch.Tensor() - end - local Tensor = parameters[1].new - local TmpTensor = Module._flattenTensorBuffer[torch.type(parameters[1])] or Tensor - - -- 1. construct the set of all unique storages referenced by parameter tensors - local storages = {} - local nParameters = 0 - local parameterMeta = {} - for k = 1,#parameters do - local param = parameters[k] - local storage = parameters[k]:storage() - local storageKey = torch.pointer(storage) - - if not storages[storageKey] then - storages[storageKey] = {storage, nParameters} - nParameters = nParameters + storage:size() - end - - parameterMeta[k] = {storageOffset = param:storageOffset() + - storages[storageKey][2], - size = param:size(), - stride = param:stride()} - end - - -- 2. construct a single tensor that will hold all the parameters - local flatParameters = TmpTensor(nParameters):zero() - - -- 3. determine if there are elements in the storage that none of the - -- parameter tensors reference ('holes') - local tensorsCompact = true - for k = 1,#parameters do - local meta = parameterMeta[k] - local tmp = TmpTensor():set( - flatParameters:storage(), meta.storageOffset, meta.size, meta.stride) - tmp:fill(1) - tensorsCompact = tensorsCompact and isCompact(tmp) - end - - local maskParameters = flatParameters:byte():clone() - local compactOffsets = flatParameters:long():cumsum(1) - local nUsedParameters = compactOffsets[-1] - - -- 4. copy storages into the flattened parameter tensor - for _, storageAndOffset in pairs(storages) do - local storage, offset = table.unpack(storageAndOffset) - flatParameters[{{offset+1,offset+storage:size()}}]:copy(Tensor():set(storage)) - end - - -- 5. allow garbage collection - storages = nil - for k = 1,#parameters do - parameters[k]:set(Tensor()) - end - - -- 6. compact the flattened parameters if there were holes - if nUsedParameters ~= nParameters then - assert(tensorsCompact, - "Cannot gather tensors that are not compact") - - flatParameters = TmpTensor(nUsedParameters):copy( - flatParameters:maskedSelect(maskParameters)) - for k = 1,#parameters do - parameterMeta[k].storageOffset = - compactOffsets[parameterMeta[k].storageOffset] - end - end - - if TmpTensor ~= Tensor then - flatParameters = Tensor(flatParameters:nElement()):copy(flatParameters) - end - - -- 7. fix up the parameter tensors to point at the flattened parameters - for k = 1,#parameters do - parameters[k]:set(flatParameters:storage(), - parameterMeta[k].storageOffset, - parameterMeta[k].size, - parameterMeta[k].stride) - end - - return flatParameters -end - -function Module:getParameters() - -- get parameters - local parameters,gradParameters = self:parameters() - local p, g = Module.flatten(parameters), Module.flatten(gradParameters) - assert(p:nElement() == g:nElement(), - 'check that you are sharing parameters and gradParameters') - if parameters then - for i=1,#parameters do - assert(parameters[i]:storageOffset() == gradParameters[i]:storageOffset(), - 'misaligned parameter at ' .. tostring(i)) - end - end - return p, g -end - -function Module:__call__(input, gradOutput) - self:forward(input) - if gradOutput then - self:backward(input, gradOutput) - return self.output, self.gradInput - else - return self.output - end -end - --- Run a callback (called with the module as an argument) in preorder over this --- module and its children. --- -function Module:apply(callback) - callback(self) - - if self.modules then - for _, module in ipairs(self.modules) do - module:apply(callback) - end - end -end - -function Module:findModules(typename, container) - container = container or self - local nodes = {} - local containers = {} - local mod_type = torch.typename(self) - if mod_type == typename then - nodes[#nodes+1] = self - containers[#containers+1] = container - end - -- Recurse on nodes with 'modules' - if (self.modules ~= nil) then - if (torch.type(self.modules) == 'table') then - for i = 1, #self.modules do - local child = self.modules[i] - local cur_nodes, cur_containers = - child:findModules(typename, self) - assert(#cur_nodes == #cur_containers, - 'Internal error: incorrect return length') -- This shouldn't happen - -- add the list items from our child to our list (ie return a - -- flattened table of the return nodes). - for j = 1, #cur_nodes do - nodes[#nodes+1] = cur_nodes[j] - containers[#containers+1] = cur_containers[j] - end - end - end - end - return nodes, containers -end - --- returns a list of modules -function Module:listModules() - local function tinsert(to, from) - if torch.type(from) == 'table' then - for i=1,#from do - tinsert(to,from[i]) - end - else - table.insert(to,from) - end - end - -- include self first - local modules = {self} - if self.modules then - for i=1,#self.modules do - local modulas = self.modules[i]:listModules() - if modulas then - tinsert(modules,modulas) - end - end - end - return modules -end - -function Module:clearState() - return nn.utils.clear(self, 'output', 'gradInput') -end - --- similar to apply, recursively goes over network and calls --- a callback function which returns a new module replacing the old one -function nn.Module:replace(callback) - local out = callback(self) - if self.modules then - for i, module in ipairs(self.modules) do - self.modules[i] = module:replace(callback) - end - end - return out -end diff --git a/contrib/lua-torch/nn/ModuleCriterion.lua b/contrib/lua-torch/nn/ModuleCriterion.lua deleted file mode 100644 index bfc79ef559..0000000000 --- a/contrib/lua-torch/nn/ModuleCriterion.lua +++ /dev/null @@ -1,44 +0,0 @@ -local ModuleCriterion, parent = torch.class("nn.ModuleCriterion", "nn.Criterion") - -function ModuleCriterion:__init(criterion, inputModule, targetModule, castTarget) - self.inputModule = inputModule - self.targetModule = targetModule - self.castTarget = (castTarget == nil) and true or castTarget - if self.inputModule then - local params = self.inputModule:parameters() - if params and #params > 0 then - print"Warning: nn.ModuleCriterion doesn't support parameter updates" - end - end - self.criterion = criterion -end - -function ModuleCriterion:updateOutput(input, target) - if self.inputModule then - self.input = self.inputModule:forward(input) - end - if self.targetModule then - self.target = self.targetModule:forward(target) - end - self.output = self.criterion:forward(self.input or input, self.target or target) - return self.output -end - -function ModuleCriterion:updateGradInput(input, target) - self.gradInput = self.criterion:backward(self.input or input, self.target or target) - if self.inputModule then - self.gradInput = self.inputModule:backward(input, self.gradInput) - end - return self.gradInput -end - -function ModuleCriterion:type(type, typecache) - if self.inputModule then - self.inputModule:type(type, typecache) - end - if self.castTarget and self.targetModule then - self.targetModule:type(type, typecache) - end - self.criterion:type(type, typecache) - return parent.type(self, type, typecache) -end diff --git a/contrib/lua-torch/nn/Mul.lua b/contrib/lua-torch/nn/Mul.lua deleted file mode 100644 index efa1db6563..0000000000 --- a/contrib/lua-torch/nn/Mul.lua +++ /dev/null @@ -1,38 +0,0 @@ -local Mul, parent = torch.class('nn.Mul', 'nn.Module') - -function Mul:__init() - parent.__init(self) - - self.weight = torch.Tensor(1) - self.gradWeight = torch.Tensor(1) - - self:reset() -end - - -function Mul:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1./math.sqrt(self.weight:size(1)) - end - - self.weight:uniform(-stdv, stdv); -end - -function Mul:updateOutput(input) - self.output:resizeAs(input):copy(input); - self.output:mul(self.weight[1]); - return self.output -end - -function Mul:updateGradInput(input, gradOutput) - self.gradInput:resizeAs(input):zero() - self.gradInput:add(self.weight[1], gradOutput) - return self.gradInput -end - -function Mul:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - self.gradWeight[1] = self.gradWeight[1] + scale*input:dot(gradOutput); -end diff --git a/contrib/lua-torch/nn/MulConstant.lua b/contrib/lua-torch/nn/MulConstant.lua deleted file mode 100644 index e8c473bee0..0000000000 --- a/contrib/lua-torch/nn/MulConstant.lua +++ /dev/null @@ -1,41 +0,0 @@ -local MulConstant, parent = torch.class('nn.MulConstant', 'nn.Module') - -function MulConstant:__init(constant_scalar,ip) - parent.__init(self) - assert(type(constant_scalar) == 'number', 'input is not scalar!') - self.constant_scalar = constant_scalar - - -- default for inplace is false - self.inplace = ip or false - if (ip and type(ip) ~= 'boolean') then - error('in-place flag must be boolean') - end -end - -function MulConstant:updateOutput(input) - if self.inplace then - input:mul(self.constant_scalar) - self.output:set(input) - else - self.output:resizeAs(input) - self.output:copy(input) - self.output:mul(self.constant_scalar) - end - return self.output -end - -function MulConstant:updateGradInput(input, gradOutput) - if self.gradInput then - if self.inplace then - gradOutput:mul(self.constant_scalar) - self.gradInput:set(gradOutput) - -- restore previous input value - input:div(self.constant_scalar) - else - self.gradInput:resizeAs(gradOutput) - self.gradInput:copy(gradOutput) - self.gradInput:mul(self.constant_scalar) - end - return self.gradInput - end -end diff --git a/contrib/lua-torch/nn/MultiCriterion.lua b/contrib/lua-torch/nn/MultiCriterion.lua deleted file mode 100644 index 9593177115..0000000000 --- a/contrib/lua-torch/nn/MultiCriterion.lua +++ /dev/null @@ -1,40 +0,0 @@ -local MultiCriterion, parent = torch.class('nn.MultiCriterion', 'nn.Criterion') - -function MultiCriterion:__init() - parent.__init(self) - self.criterions = {} - self.weights = torch.DoubleStorage() -end - -function MultiCriterion:add(criterion, weight) - assert(criterion, 'no criterion provided') - weight = weight or 1 - table.insert(self.criterions, criterion) - self.weights:resize(#self.criterions, true) - self.weights[#self.criterions] = weight - return self -end - -function MultiCriterion:updateOutput(input, target) - self.output = 0 - for i=1,#self.criterions do - self.output = self.output + self.weights[i]*self.criterions[i]:updateOutput(input, target) - end - return self.output -end - -function MultiCriterion:updateGradInput(input, target) - self.gradInput = nn.utils.recursiveResizeAs(self.gradInput, input) - nn.utils.recursiveFill(self.gradInput, 0) - for i=1,#self.criterions do - nn.utils.recursiveAdd(self.gradInput, self.weights[i], self.criterions[i]:updateGradInput(input, target)) - end - return self.gradInput -end - -function MultiCriterion:type(type) - for i,criterion in ipairs(self.criterions) do - criterion:type(type) - end - return parent.type(self, type) -end diff --git a/contrib/lua-torch/nn/MultiLabelMarginCriterion.lua b/contrib/lua-torch/nn/MultiLabelMarginCriterion.lua deleted file mode 100644 index 908b6133c3..0000000000 --- a/contrib/lua-torch/nn/MultiLabelMarginCriterion.lua +++ /dev/null @@ -1,41 +0,0 @@ -local MultiLabelMarginCriterion, parent = torch.class('nn.MultiLabelMarginCriterion', 'nn.Criterion') - -function MultiLabelMarginCriterion:__init() - parent.__init(self) - self.sizeAverage = true - self.isTarget = torch.Tensor() -end - -function MultiLabelMarginCriterion:updateOutput(input, target) - if torch.typename(input):find('torch%.Cuda.*Tensor') then - target = torch.CudaLongTensor and target:cudaLong() or target - else - target = target:long() - end - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.MultiLabelMarginCriterion_updateOutput( - input:cdata(), - target:cdata(), - self.output_tensor:cdata(), - self.isTarget:cdata(), - self.sizeAverage - ) - self.output = self.output_tensor[1] - return self.output -end - -function MultiLabelMarginCriterion:updateGradInput(input, target) - if torch.typename(input):find('torch%.Cuda.*Tensor') then - target = torch.CudaLongTensor and target:cudaLong() or target - else - target = target:long() - end - input.THNN.MultiLabelMarginCriterion_updateGradInput( - input:cdata(), - target:cdata(), - self.gradInput:cdata(), - self.isTarget:cdata(), - self.sizeAverage - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/MultiLabelSoftMarginCriterion.lua b/contrib/lua-torch/nn/MultiLabelSoftMarginCriterion.lua deleted file mode 100644 index 9d471d449f..0000000000 --- a/contrib/lua-torch/nn/MultiLabelSoftMarginCriterion.lua +++ /dev/null @@ -1,86 +0,0 @@ ---[[ --- A MultiLabel multiclass criterion based on sigmoid: --- --- the loss is: --- l(x,y) = - sum_i y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i]) --- where p[i] = exp(x[i]) / (1 + exp(x[i])) --- --- and with weights: --- l(x,y) = - sum_i weights[i] (y[i] * log(p[i]) + (1 - y[i]) * log (1 - p[i])) --- --- This uses the stable form of the loss and gradients. ---]] - - -local MultiLabelSoftMarginCriterion, parent = torch.class('nn.MultiLabelSoftMarginCriterion', 'nn.Criterion') - - -function MultiLabelSoftMarginCriterion:__init(weights, sizeAverage) - parent.__init(self) - if sizeAverage ~= nil then - self.sizeAverage = sizeAverage - else - self.sizeAverage = true - end - if weights ~= nil then - assert(weights:dim() == 1, "weights input should be 1-D Tensor") - self.weights = weights - end - self.sigmoid = nn.Sigmoid() -end - -function MultiLabelSoftMarginCriterion:updateOutput(input, target) - local weights = self.weights - if weights ~= nil and target:dim() ~= 1 then - weights = self.weights:view(1, target:size(2)):expandAs(target) - end - - local x = input:view(input:nElement()) - local t = target:view(target:nElement()) - - self.sigmoid:updateOutput(x) - - self._buffer1 = self._buffer1 or input.new() - self._buffer2 = self._buffer2 or input.new() - - self._buffer1:ge(x, 0) -- indicator - - -- log(1 + exp(x - cmul(x, indicator):mul(2))) - self._buffer2:cmul(x, self._buffer1):mul(-2):add(x):exp():add(1):log() - -- cmul(x, t - indicator) - self._buffer1:mul(-1):add(t):cmul(x) - -- log(1 + exp(x - cmul(x, indicator):mul(2))) - cmul(x, t - indicator) - self._buffer2:add(-1, self._buffer1) - - if weights ~= nil then - self._buffer2:cmul(weights) - end - - self.output = self._buffer2:sum() - - if self.sizeAverage then - self.output = self.output / input:nElement() - end - - return self.output -end - -function MultiLabelSoftMarginCriterion:updateGradInput(input, target) - local weights = self.weights - if weights ~= nil and target:dim() ~= 1 then - weights = self.weights:view(1, target:size(2)):expandAs(target) - end - - self.gradInput:resizeAs(input):copy(self.sigmoid.output) - self.gradInput:add(-1, target) - - if weights ~= nil then - self.gradInput:cmul(weights) - end - - if self.sizeAverage then - self.gradInput:div(target:nElement()) - end - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/MultiMarginCriterion.lua b/contrib/lua-torch/nn/MultiMarginCriterion.lua deleted file mode 100644 index e3122386a0..0000000000 --- a/contrib/lua-torch/nn/MultiMarginCriterion.lua +++ /dev/null @@ -1,64 +0,0 @@ -local THNN = require 'nn.THNN' -local MultiMarginCriterion, parent = torch.class('nn.MultiMarginCriterion', 'nn.Criterion') - -function MultiMarginCriterion:__init(p, weights, margin) - assert(p == nil or p == 1 or p == 2, 'only p=1 and p=2 supported') - self.p = p or 1 - self.margin = margin or 1.0 - parent.__init(self) - self.sizeAverage = true - if weights then - assert(weights:dim() == 1, "weights input should be 1-D Tensor") - self.weights = weights - end -end - -function MultiMarginCriterion:updateOutput(input, target) - -- backward compatibility - if not torch.isTensor(target) then - self.target_tensor = self.target_tensor or torch.LongTensor(1) - self.target_tensor[1] = target - target = self.target_tensor - end - if torch.typename(input):find('torch%.Cuda.*Tensor') then - target = torch.CudaLongTensor and target:cudaLong() or target - else - target = target:long() - end - self.p = self.p or 1 - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.MultiMarginCriterion_updateOutput( - input:cdata(), - target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage, - self.p, - THNN.optionalTensor(self.weights), - self.margin - ) - self.output = self.output_tensor[1] - return self.output -end - -function MultiMarginCriterion:updateGradInput(input, target) - if not torch.isTensor(target) then - self.target_tensor = self.target_tensor or torch.LongTensor(1) - self.target_tensor[1] = target - target = self.target_tensor - end - if torch.typename(input):find('torch%.Cuda.*Tensor') then - target = torch.CudaLongTensor and target:cudaLong() or target - else - target = target:long() - end - input.THNN.MultiMarginCriterion_updateGradInput( - input:cdata(), - target:cdata(), - self.gradInput:cdata(), - self.sizeAverage, - self.p, - THNN.optionalTensor(self.weights), - self.margin - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/NaN.lua b/contrib/lua-torch/nn/NaN.lua deleted file mode 100644 index b80f6a04d0..0000000000 --- a/contrib/lua-torch/nn/NaN.lua +++ /dev/null @@ -1,72 +0,0 @@ ------------------------------------------------------------------------- ---[[ NaN ]]-- --- Asserts that outputs and gradInputs do not contain NaNs. --- Useful for locating the source of NaN errors. ------------------------------------------------------------------------- -local NaN, parent = torch.class("nn.NaN", "nn.Decorator") - -local idseq = 0 -function NaN.newId() - idseq = idseq + 1 - return idseq -end - -function NaN:__init(module, id) - parent.__init(self, module) - self.id = id or NaN.newId() -end - -function NaN:recursiveIsNaN(tensor) - local isNaN = false - if torch.type(tensor) == 'table' then - for k,v in pairs(tensor) do - isNaN = self:recursiveIsNaN(v) - if isNaN then break end - end - else - local _ = require 'moses' - isNaN = _.isNaN(tensor:sum()) - end - return isNaN -end - -function NaN:updateOutput(input) - self.output = self.modules[1]:updateOutput(input) - if self:recursiveIsNaN(self.output) then - if self:recursiveIsNaN(input) then - error(string.format("NaN found in input of module :\n%s", self:__tostring__())) - elseif self:recursiveIsNaN(self:parameters()) then - error(string.format("NaN found in parameters of module :\n%s", self:__tostring__())) - end - error(string.format("NaN found in output of module :\n%s", self:__tostring__())) - end - return self.output -end - -function NaN:updateGradInput(input, gradOutput) - self.gradInput = self.modules[1]:updateGradInput(input, gradOutput) - if self:recursiveIsNaN(self.gradInput) then - if self:recursiveIsNaN(gradOutput) then - error(string.format("NaN found in gradOutput of module :\n%s", self:__tostring__())) - end - error(string.format("NaN found in gradInput of module :\n%s", self:__tostring__())) - end - return self.gradInput -end - -function NaN:accGradParameters(input, gradOutput, scale) - self.modules[1]:accGradParameters(input, gradOutput, scale) - local params, gradParams = self:parameters() - if self:recursiveIsNaN(gradParams) then - error(string.format("NaN found in gradParameters of module :\n%s", self:__tostring__())) - end -end - -function NaN:__tostring__() - local selfstring = torch.type(self) .. '(' .. self.id .. ')' - if self.modules[1].__tostring__ then - return selfstring .. ' @ ' .. self.modules[1]:__tostring__() - else - return selfstring .. ' @ ' .. torch.type(self.modules[1]) - end -end diff --git a/contrib/lua-torch/nn/Narrow.lua b/contrib/lua-torch/nn/Narrow.lua deleted file mode 100644 index a6ebaa3213..0000000000 --- a/contrib/lua-torch/nn/Narrow.lua +++ /dev/null @@ -1,45 +0,0 @@ -local Narrow, parent = torch.class('nn.Narrow', 'nn.Module') - -function Narrow:__init(dimension,offset,length) - parent.__init(self) - self.dimension=dimension - self.index=offset - self.length=length or 1 - if not dimension or not offset then - error('nn.Narrow(dimension, offset, length)') - end -end - -function Narrow:updateOutput(input) - local dim = self.dimension < 0 and input:dim() + self.dimension + 1 or self.dimension - local length = self.length - if length < 0 then - length = input:size(dim) - self.index + self.length + 2 - end - local index = self.index - if self.index < 0 then - index = 1 - length = input:size(dim) - length - end - local output=input:narrow(dim, index, length) - self.output = self.output:typeAs(output) - self.output:resizeAs(output):copy(output) - return self.output -end - -function Narrow:updateGradInput(input, gradOutput) - local dim = self.dimension < 0 and input:dim() + self.dimension + 1 or self.dimension - local length = self.length - if length < 0 then - length = input:size(dim) - self.index + self.length + 2 - end - local index = self.index - if self.index < 0 then - index = 1 - length = input:size(dim) - length - end - self.gradInput = self.gradInput:typeAs(input) - self.gradInput:resizeAs(input):zero() - self.gradInput:narrow(dim,index,length):copy(gradOutput) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/NarrowTable.lua b/contrib/lua-torch/nn/NarrowTable.lua deleted file mode 100644 index 17429f3b1a..0000000000 --- a/contrib/lua-torch/nn/NarrowTable.lua +++ /dev/null @@ -1,43 +0,0 @@ -local NarrowTable, parent = torch.class('nn.NarrowTable', 'nn.Module') - -function NarrowTable:__init(offset, length) - parent.__init(self) - self.offset = offset - self.length = length or 1 - if not offset then - error('nn.NarrowTable(offset, length)') - end - - self.output = {} - self.gradInput = {} -end - -function NarrowTable:updateOutput(input) - for k,v in ipairs(self.output) do self.output[k] = nil end - for i=1,self.length do - self.output[i] = input[self.offset+i-1] - end - return self.output -end - -function NarrowTable:updateGradInput(input, gradOutput) - for i=1,#gradOutput do - self.gradInput[self.offset+i-1] = gradOutput[i] - end - for i=1,#input do - if (i < self.offset) or (i >= self.offset + self.length) then - self.gradInput[i] = nn.utils.recursiveResizeAs(self.gradInput[i], input[i]) - nn.utils.recursiveFill(self.gradInput[i], 0) - end - end - for i=#input+1,#self.gradInput do self.gradInput[i] = nil end - return self.gradInput -end - -function NarrowTable:type(type, tensorCache) - self.output = {} - self.gradInput = {} - return parent.type(self, type, tensorCache) -end - -NarrowTable.clearState = nn.Identity.clearState diff --git a/contrib/lua-torch/nn/Normalize.lua b/contrib/lua-torch/nn/Normalize.lua deleted file mode 100644 index 0937ebba94..0000000000 --- a/contrib/lua-torch/nn/Normalize.lua +++ /dev/null @@ -1,150 +0,0 @@ -local Normalize, parent = torch.class('nn.Normalize', 'nn.Module') - -function Normalize:__init(p,eps) - parent.__init(self) - assert(p,'p-norm not provided') - assert(p > 0, p..'-norm not supported') - self.p = p - self.eps = eps or 1e-10 -end - -function Normalize:updateOutput(input) - assert(input:dim() <= 2, 'only 1d layer supported') - local input_size = input:size() - if input:dim() == 1 then - input = input:view(1,-1) - end - - self._output = self._output or input.new() - self.norm = self.norm or input.new() - self.buffer = self.buffer or input.new() - - self._output:resizeAs(input) - - if self.p == math.huge then - -- specialization for the infinity norm - if not self._indices then - if torch.typename(self.output):find('torch%.Cuda.*Tensor') then - self._indices = torch.CudaLongTensor and torch.CudaLongTensor() or torch.CudaTensor() - else - self._indices = torch.LongTensor() - end - end - - self.buffer:abs(input) - torch.max(self.norm, self._indices, self.buffer, 2) - self.norm:add(self.eps) - else - self.normp = self.normp or input.new() - if self.p % 2 ~= 0 then - self.buffer:abs(input):pow(self.p) - else - self.buffer:pow(input,self.p) - end - self.normp:sum(self.buffer,2):add(self.eps) - self.norm:pow(self.normp,1/self.p) - end - self._output:cdiv(input, self.norm:view(-1,1):expandAs(input)) - - self.output:view(self._output, input_size) - return self.output -end - -function Normalize:updateGradInput(input, gradOutput) - assert(input:dim() <= 2, 'only 1d layer supported') - assert(gradOutput:dim() <= 2, 'only 1d layer supported') - - local input_size = input:size() - if input:dim() == 1 then - input = input:view(1,-1) - end - - local n = input:size(1) -- batch size - local d = input:size(2) -- dimensionality of vectors - - self._gradInput = self._gradInput or input.new() - self.cross = self.cross or input.new() - -- compute diagonal term with gradOutput - self._gradInput:resize(n,d) - if self.p == math.huge then - -- specialization for the inf case - self._gradInput:cmul(self.norm:view(n,1,1):expand(n,d,1),gradOutput) - self.buffer:resizeAs(input):zero() - self.cross:resize(n,1) - self.cross:gather(input,2,self._indices) - self.cross:cdiv(self.norm) - self.buffer:scatter(2,self._indices,self.cross) - else - self._gradInput:cmul(self.normp:view(n,1):expand(n,d), gradOutput) - -- small optimizations for different p - -- buffer = input*|input|^(p-2) - if self.p % 2 ~= 0 then - -- for non-even p, need to add absolute value - if self.p < 2 then - -- add eps to avoid possible division by 0 - self.buffer:abs(input):add(self.eps):pow(self.p-2):cmul(input) - else - self.buffer:abs(input):pow(self.p-2):cmul(input) - end - elseif self.p == 2 then - -- special case for p == 2, pow(x,0) = 1 - self.buffer:copy(input) - else - -- p is even and > 2, pow(x,p) is always positive - self.buffer:pow(input,self.p-2):cmul(input) - end - end - -- compute cross term in two steps - self.cross:resize(n,1) - - -- instead of having a huge temporary matrix (b1*b2), - -- do the computations as b1*(b2*gradOutput). This avoids redundant - -- computation and also a huge buffer of size n*d^2 - self.buffer2 = self.buffer2 or input.new() -- nxd - self.buffer2:cmul(input, gradOutput) - self.cross:sum(self.buffer2, 2) - - self.buffer:cmul(self.cross:expandAs(self.buffer)) - self._gradInput:add(-1, self.buffer) - - -- reuse cross buffer for normalization - if self.p == math.huge then - self.cross:cmul(self.norm,self.norm) - else - self.cross:cmul(self.normp,self.norm) - end - self._gradInput:cdiv(self.cross:expand(n,d)) - - self.gradInput:view(self._gradInput, input_size) - return self.gradInput -end - -function Normalize:__tostring__() - local s - -- different prints if the norm is integer - if self.p % 1 == 0 then - s = '%s(%d)' - else - s = '%s(%f)' - end - return string.format(s,torch.type(self),self.p) -end - -function Normalize:type(type, tensorCache) - self._indices = nil - parent.type(self, type, tensorCache) - return self -end - -function Normalize:clearState() - nn.utils.clear(self, { - '_output', - '_indices', - '_gradInput', - 'buffer', - 'norm', - 'normp', - 'cross', - }) - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/OneHot.lua b/contrib/lua-torch/nn/OneHot.lua deleted file mode 100644 index d1dc1b52db..0000000000 --- a/contrib/lua-torch/nn/OneHot.lua +++ /dev/null @@ -1,69 +0,0 @@ -local OneHot, parent = torch.class('nn.OneHot', 'nn.Module') - --- adapted from https://github.com/karpathy/char-rnn --- and https://github.com/hughperkins/char-lstm - -function OneHot:__init(outputSize) - parent.__init(self) - self.outputSize = outputSize -end - -function OneHot:updateOutput(input) - local size - if type(input) == 'number' then - if self:type() == 'torch.CudaTensor' then - self._single = self._single or torch.CudaTensor():resize(1); - else - self._single = self._single or torch.LongTensor():resize(1); - end - self._single[1] = input - input = self._single; - size = {} - else - size = input:size():totable() - end - table.insert(size, self.outputSize) - - self.output:resize(table.unpack(size)):zero() - - size[#size] = 1 - local input_ = input:view(table.unpack(size)) - - if torch.type(input) == 'torch.CudaTensor' or torch.type(input) == 'torch.ClTensor' then - self.output:scatter(self.output:dim(), input_, 1) - else - if torch.type(self.output) == 'torch.CudaTensor' then - -- input is not cuda, module is, cast input to cuda - self._input = self._input or torch.CudaTensor() - self._input:resize(input_:size()):copy(input_) - input_ = self._input - elseif torch.type(input) ~= 'torch.LongTensor' then - -- input is not long, module isnot cuda, cast input to long - self._input = self._input or torch.LongTensor() - self._input:resize(input_:size()):copy(input_) - input_ = self._input - end - self.output:scatter(self.output:dim(), input_, 1) - end - - return self.output -end - -function OneHot:updateGradInput(input, gradOutput) - if type(input) == 'number' then - return 0 - else - self.gradInput:resize(input:size()):zero() - return self.gradInput - end -end - -function OneHot:clearState() - self._single = nil - self._input = nil -end - -function OneHot:type(type, typecache) - self:clearState() - return parent.type(self, type, typecache) -end diff --git a/contrib/lua-torch/nn/PReLU.lua b/contrib/lua-torch/nn/PReLU.lua deleted file mode 100644 index 2e58fba4e9..0000000000 --- a/contrib/lua-torch/nn/PReLU.lua +++ /dev/null @@ -1,52 +0,0 @@ -local PReLU, parent = torch.class('nn.PReLU','nn.Module') - -function PReLU:__init(nOutputPlane) - parent.__init(self) - -- if no argument provided, use shared model (weight is scalar) - self.nOutputPlane = nOutputPlane or 0 - self.weight = torch.Tensor(nOutputPlane or 1):fill(0.25) - self.gradWeight = torch.Tensor(nOutputPlane or 1) -end - -function PReLU:updateOutput(input) - input.THNN.PReLU_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - self.nOutputPlane - ) - return self.output -end - -function PReLU:updateGradInput(input, gradOutput) - input.THNN.PReLU_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.nOutputPlane - ) - return self.gradInput -end - -function PReLU:accGradParameters(input, gradOutput, scale) - self.gradWeightBuf = self.gradWeightBuf or input.new() - self.gradWeightBuf2 = self.gradWeightBuf2 or input.new() - input.THNN.PReLU_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.gradWeight:cdata(), - self.gradWeightBuf:cdata(), - self.gradWeightBuf2:cdata(), - self.nOutputPlane, - scale or 1 - ) - return self.gradWeight -end - -function PReLU:clearState() - nn.utils.clear(self, 'gradWeightBuf', 'gradWeightBuf2') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/Padding.lua b/contrib/lua-torch/nn/Padding.lua deleted file mode 100644 index d5f7771d07..0000000000 --- a/contrib/lua-torch/nn/Padding.lua +++ /dev/null @@ -1,65 +0,0 @@ -local Padding, parent = torch.class('nn.Padding', 'nn.Module') - --- pad puts in [pad] amount of [value] over dimension [dim], starting at index [index] in that dimension. If pad<0, index counts from the left. If pad>0 index counts from the right --- index = 1 pads before index 1. index = 2 pads starting before index 2 and after index 1 in dimension [dim] -function Padding:__init(dim, pad, nInputDim, value, index) - self.value = value or 0 - self.index = index or 1 - self.dim = dim - self.pad = pad - self.nInputDim = nInputDim - self.outputSize = torch.LongStorage() - parent.__init(self) -end - -function Padding:updateOutput(input) - self.outputSize:resize(input:dim()) - self.outputSize:copy(input:size()) - local dim = self.dim - if self.nInputDim and input:dim() ~= self.nInputDim then - dim = dim + 1 - end - self.outputSize[dim] = self.outputSize[dim] + math.abs(self.pad) - self.output:resize(self.outputSize) - self.output:fill(self.value) - local index = self.index - local pad = self.pad - if pad > 0 then - index = input:size(dim) - index + 2 - else - pad = -pad - end - if index == 1 then - self.output:narrow(dim, 1 + pad, input:size(dim)):copy(input) - elseif index == input:size(dim) + 1 then - self.output:narrow(dim, 1, input:size(dim)):copy(input) - else - self.output:narrow(dim, 1, index - 1):copy(input:narrow(dim, 1, index - 1)) - self.output:narrow(dim, index + pad, input:size(dim) - (index - 1)):copy(input:narrow(dim, index, input:size(dim) - (index - 1))) - end - return self.output -end - -function Padding:updateGradInput(input, gradOutput) - self.gradInput:resizeAs(input) - local dim = self.dim - if self.nInputDim and input:dim() ~= self.nInputDim then - dim = dim + 1 - end - local index = self.index - local pad = self.pad - if pad > 0 then - index = input:size(dim) - index + 2 - else - pad = -pad - end - if index == 1 then - self.gradInput:copy(gradOutput:narrow(dim, 1 + pad, input:size(dim))) - elseif index == input:size(dim) + 1 then - self.gradInput:copy(gradOutput:narrow(dim, 1, input:size(dim))) - else - self.gradInput:narrow(dim, 1, index - 1):copy(gradOutput:narrow(dim, 1, index - 1)) - self.gradInput:narrow(dim, index, input:size(dim) - (index - 1)):copy(gradOutput:narrow(dim, index + pad, input:size(dim) - (index - 1))) - end - return self.gradInput -end diff --git a/contrib/lua-torch/nn/PairwiseDistance.lua b/contrib/lua-torch/nn/PairwiseDistance.lua deleted file mode 100644 index 99a502c160..0000000000 --- a/contrib/lua-torch/nn/PairwiseDistance.lua +++ /dev/null @@ -1,91 +0,0 @@ -local PairwiseDistance, parent = torch.class('nn.PairwiseDistance', 'nn.Module') - -function PairwiseDistance:__init(p) - parent.__init(self) - - -- state - self.gradInput = {} - self.diff = torch.Tensor() - self.norm = p or 2 -- Default using Euclidean distance -end - -function PairwiseDistance:updateOutput(input) - self.output:resize(1) - if input[1]:dim() == 1 then - self.output:resize(1) - self.output[1]=input[1]:dist(input[2],self.norm) - elseif input[1]:dim() == 2 then - self.diff = self.diff or input[1].new() - self.diff:resizeAs(input[1]) - - local diff = self.diff:zero() - diff:add(input[1], -1, input[2]) - diff:abs() - - self.output:resize(input[1]:size(1)) - self.output:zero() - self.output:add(diff:pow(self.norm):sum(2)) - self.output:pow(1./self.norm) - else - error('input must be vector or matrix') - end - - return self.output -end - -local function mathsign(x) - if x==0 then return 2*torch.random(2)-3; end - if x>0 then return 1; else return -1; end -end - -function PairwiseDistance:updateGradInput(input, gradOutput) - if input[1]:dim() > 2 then - error('input must be vector or matrix') - end - - self.gradInput[1] = (self.gradInput[1] or input[1].new()):resize(input[1]:size()) - self.gradInput[2] = (self.gradInput[2] or input[2].new()):resize(input[2]:size()) - self.gradInput[1]:copy(input[1]) - self.gradInput[1]:add(-1, input[2]) - - if self.norm==1 then - self.gradInput[1]:apply(mathsign) - else - -- Note: derivative of p-norm: - -- d/dx_k(||x||_p) = (x_k * abs(x_k)^(p-2)) / (||x||_p)^(p-1) - if (self.norm > 2) then - self.gradInput[1]:cmul(self.gradInput[1]:clone():abs():pow(self.norm-2)) - end - - if (input[1]:dim() > 1) then - self.outExpand = self.outExpand or self.output.new() - self.outExpand:resize(self.output:size(1), 1) - self.outExpand:copy(self.output) - self.outExpand:add(1.0e-6) -- Prevent divide by zero errors - self.outExpand:pow(-(self.norm-1)) - self.gradInput[1]:cmul(self.outExpand:expand(self.gradInput[1]:size(1), - self.gradInput[1]:size(2))) - else - self.gradInput[1]:mul(math.pow(self.output[1] + 1e-6, -(self.norm-1))) - end - end - if input[1]:dim() == 1 then - self.gradInput[1]:mul(gradOutput[1]) - else - self.grad = self.grad or gradOutput.new() - self.ones = self.ones or gradOutput.new() - - self.grad:resizeAs(input[1]):zero() - self.ones:resize(input[1]:size(2)):fill(1) - - self.grad:addr(gradOutput, self.ones) - self.gradInput[1]:cmul(self.grad) - end - self.gradInput[2]:zero():add(-1, self.gradInput[1]) - return self.gradInput -end - -function PairwiseDistance:clearState() - nn.utils.clear(self, 'diff', 'outExpand', 'grad', 'ones') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/Parallel.lua b/contrib/lua-torch/nn/Parallel.lua deleted file mode 100644 index 58cb9748ea..0000000000 --- a/contrib/lua-torch/nn/Parallel.lua +++ /dev/null @@ -1,116 +0,0 @@ -local Parallel, parent = torch.class('nn.Parallel', 'nn.Container') - -function Parallel:__init(inputDimension,outputDimension) - parent.__init(self) - self.modules = {} - self.inputDimension = inputDimension - self.outputDimension = outputDimension -end - -function Parallel:updateOutput(input) - local nModule=input:size(self.inputDimension) - local outputs = {} - self.totalOutputSize = self.totalOutputSize or torch.LongStorage() - local totalOutputSize = self.totalOutputSize - - for i=1,nModule do - local currentInput = input:select(self.inputDimension,i) - local currentOutput = self:rethrowErrors(self.modules[i], i, 'updateOutput', currentInput) - table.insert(outputs, currentOutput) - local outputSize = currentOutput:size(self.outputDimension) - - if i == 1 then - totalOutputSize:resize(currentOutput:dim()):copy(currentOutput:size()) - else - totalOutputSize[self.outputDimension] = totalOutputSize[self.outputDimension] + outputSize - end - - end - self.output:resize(totalOutputSize) - - local offset = 1 - for i=1,nModule do - local currentOutput = outputs[i] - local outputSize = currentOutput:size(self.outputDimension) - self.output:narrow(self.outputDimension, offset, outputSize):copy(currentOutput) - offset = offset + currentOutput:size(self.outputDimension) - end - return self.output -end - -function Parallel:updateGradInput(input, gradOutput) - local nModule=input:size(self.inputDimension) - self.gradInput:resizeAs(input) - - local offset = 1 - for i=1,nModule do - local module=self.modules[i] - local currentInput = input:select(self.inputDimension,i) - local currentOutput = module.output - local outputSize = currentOutput:size(self.outputDimension) - local currentGradOutput = gradOutput:narrow(self.outputDimension, offset, outputSize) - - local currentGradInput = self:rethrowErrors(module, i, 'updateGradInput', currentInput, currentGradOutput) - - self.gradInput:select(self.inputDimension,i):copy(currentGradInput) - offset = offset + outputSize - end - return self.gradInput -end - -function Parallel:accGradParameters(input, gradOutput, scale) - local nModule=input:size(self.inputDimension) - - local offset = 1 - for i=1,nModule do - local module = self.modules[i] - local currentOutput = module.output - local outputSize = currentOutput:size(self.outputDimension) - - self:rethrowErrors(module, i, 'accGradParameters', - input:select(self.inputDimension,i), - gradOutput:narrow(self.outputDimension, offset,outputSize), - scale) - - offset = offset + outputSize - end -end - -function Parallel:accUpdateGradParameters(input, gradOutput, lr) - local nModule=input:size(self.inputDimension) - - local offset = 1 - for i=1,nModule do - local module = self.modules[i]; - local currentOutput = module.output - self:rethrowErrors(module, i, 'accUpdateGradParameters', - input:select(self.inputDimension,i), - gradOutput:narrow(self.outputDimension, offset, - currentOutput:size(self.outputDimension)), - lr) - - offset = offset + currentOutput:size(self.outputDimension) - end -end - -function Parallel:__tostring__() - local tab = ' ' - local line = '\n' - local next = ' |`-> ' - local lastNext = ' `-> ' - local ext = ' | ' - local extlast = ' ' - local last = ' ... -> ' - local str = torch.type(self) - str = str .. ' {' .. line .. tab .. 'input' - for i=1,#self.modules do - if i == #self.modules then - str = str .. line .. tab .. lastNext .. '(' .. i .. '): ' .. tostring(self.modules[i]):gsub(line, line .. tab .. extlast) - else - str = str .. line .. tab .. next .. '(' .. i .. '): ' .. tostring(self.modules[i]):gsub(line, line .. tab .. ext) - end - end - str = str .. line .. tab .. last .. 'output' - str = str .. line .. '}' - return str -end diff --git a/contrib/lua-torch/nn/ParallelCriterion.lua b/contrib/lua-torch/nn/ParallelCriterion.lua deleted file mode 100644 index 45607d5c36..0000000000 --- a/contrib/lua-torch/nn/ParallelCriterion.lua +++ /dev/null @@ -1,41 +0,0 @@ -local ParallelCriterion, parent = torch.class('nn.ParallelCriterion', 'nn.Criterion') - -function ParallelCriterion:__init(repeatTarget) - parent.__init(self) - self.criterions = {} - self.weights = {} - self.gradInput = {} - self.repeatTarget = repeatTarget -end - -function ParallelCriterion:add(criterion, weight) - assert(criterion, 'no criterion provided') - weight = weight or 1 - table.insert(self.criterions, criterion) - table.insert(self.weights, weight) - return self -end - -function ParallelCriterion:updateOutput(input, target) - self.output = 0 - for i,criterion in ipairs(self.criterions) do - local target = self.repeatTarget and target or target[i] - self.output = self.output + self.weights[i]*criterion:updateOutput(input[i],target) - end - return self.output -end - -function ParallelCriterion:updateGradInput(input, target) - self.gradInput = nn.utils.recursiveResizeAs(self.gradInput, input) - nn.utils.recursiveFill(self.gradInput, 0) - for i,criterion in ipairs(self.criterions) do - local target = self.repeatTarget and target or target[i] - nn.utils.recursiveAdd(self.gradInput[i], self.weights[i], criterion:updateGradInput(input[i], target)) - end - return self.gradInput -end - -function ParallelCriterion:type(type, tensorCache) - self.gradInput = {} - return parent.type(self, type, tensorCache) -end diff --git a/contrib/lua-torch/nn/ParallelTable.lua b/contrib/lua-torch/nn/ParallelTable.lua deleted file mode 100644 index 2fe0899dd5..0000000000 --- a/contrib/lua-torch/nn/ParallelTable.lua +++ /dev/null @@ -1,58 +0,0 @@ -local ParallelTable, parent = torch.class('nn.ParallelTable', 'nn.Container') - -function ParallelTable:__init() - parent.__init(self) - self.modules = {} - self.output = {} - self.gradInput = {} -end - -function ParallelTable:updateOutput(input) - for i=1,#self.modules do - self.output[i] = self:rethrowErrors(self.modules[i], i, 'updateOutput', input[i]) - end - return self.output -end - -function ParallelTable:updateGradInput(input, gradOutput) - for i,module in ipairs(self.modules) do - self.gradInput[i] = self:rethrowErrors(module, i, 'updateGradInput', input[i], gradOutput[i]) - end - return self.gradInput -end - -function ParallelTable:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - for i,module in ipairs(self.modules) do - self:rethrowErrors(module, i, 'accGradParameters', input[i], gradOutput[i], scale) - end -end - -function ParallelTable:accUpdateGradParameters(input, gradOutput, lr) - lr = lr or 1 - for i,module in ipairs(self.modules) do - self:rethrowErrors(module, i, 'accUpdateGradParameters', input[i], gradOutput[i], lr) - end -end - -function ParallelTable:__tostring__() - local tab = ' ' - local line = '\n' - local next = ' |`-> ' - local lastNext = ' `-> ' - local ext = ' | ' - local extlast = ' ' - local last = ' ... -> ' - local str = torch.type(self) - str = str .. ' {' .. line .. tab .. 'input' - for i=1,#self.modules do - if i == #self.modules then - str = str .. line .. tab .. lastNext .. '(' .. i .. '): ' .. tostring(self.modules[i]):gsub(line, line .. tab .. extlast) - else - str = str .. line .. tab .. next .. '(' .. i .. '): ' .. tostring(self.modules[i]):gsub(line, line .. tab .. ext) - end - end - str = str .. line .. tab .. last .. 'output' - str = str .. line .. '}' - return str -end diff --git a/contrib/lua-torch/nn/PartialLinear.lua b/contrib/lua-torch/nn/PartialLinear.lua deleted file mode 100644 index 6e92cfc08e..0000000000 --- a/contrib/lua-torch/nn/PartialLinear.lua +++ /dev/null @@ -1,114 +0,0 @@ -local PartialLinear, Module = torch.class('nn.PartialLinear', 'nn.Module') - ---[[ - -PartialLinear is a Linear layer that allows the user to a set a collection of -column indices. When the column indices are set, the layer will behave like a -Linear layer that only has those columns. Meanwhile, all parameters are -preserved, so resetting the PartialLinear layer will result in a module that -behaves just like a regular Linear layer. - -This module is useful, for instance, when you want to do forward-backward on -only a subset of a Linear layer during training but use the full Linear layer -at test time. - -]]-- - -function PartialLinear:__init(inputsize, outputsize, bias) - local bias = ((bias == nil) and true) or bias - Module.__init(self) - - -- define the layer as a small network: - local pt = nn.ParallelTable() - pt:add(nn.Identity()):add(nn.LookupTable(outputsize, inputsize)) - self.network = nn.Sequential():add(pt):add(nn.MM(false, true)) - if bias then - self.bias = torch.Tensor(1, outputsize):zero() - self.gradBias = torch.Tensor(1, outputsize):zero() - end - - -- set partition: - self.inputsize = inputsize - self.outputsize = outputsize - self.allcolumns = torch.range(1, self.outputsize) - self:resetPartition() -end - -function PartialLinear:setPartition(indices) - self.partition = indices:type(self.allcolumns:type()) -end - -function PartialLinear:resetPartition() - self.partition = self.allcolumns -end - -function PartialLinear:parameters() - return {self.network:get(1):get(2).weight, self.bias}, - {self.network:get(1):get(2).gradWeight, self.gradBias} -end -- should return only the relevant partition? - -function PartialLinear:updateOutput(input) - self.output:set(self.network:forward{input, self.partition}) - if self.bias then - self.output:add( - self.bias:index(2, self.partition:long()):expandAs(self.output) - ) - self.addBuffer = self.addBuffer or input.new() - if self.addBuffer:nElement() ~= input:size(1) then - self.addBuffer:resize(input:size(1)):fill(1) - end - end - return self.output -end - -function PartialLinear:updateGradInput(input, gradOutput) - if self.gradInput then - self.network:updateGradInput({input, self.partition}, gradOutput) - self.gradInput:set(self.network.gradInput[1]) - end - return self.gradInput -end - -function PartialLinear:accGradParameters(input, gradOutput, scale) - local scale = scale or 1 - self.network:accGradParameters({input, self.partition}, gradOutput, scale) - if self.bias then - self.buffer = self.buffer or input.new() - self.buffer:resize(gradOutput:size(2)) - self.buffer:mv(gradOutput:t(), self.addBuffer):mul(scale) - self.gradBias:indexAdd( - 2, self.partition:long(), self.buffer:view(1, self.buffer:nElement()) - ) - end -end - -function PartialLinear:accUpdateGradParameters(input, gradOutput, lr) - local gradWeight = self.network:get(1):get(2).gradWeight - local gradBias = self.gradBias - self.network:get(1):get(2).gradWeight = self.network:get(1):get(2).weight - self.gradBias = self.bias - self:accGradParameters(input, gradOutput, -lr) - self.network:get(1):get(2).gradWeight = gradWeight - self.gradBias = gradBias -end - -function PartialLinear:zeroGradParameters() - self.network:zeroGradParameters() - self.gradBias:zero() -end - -function PartialLinear:updateParameters(learningRate) - self.network:updateParameters(learningRate) - self.bias:add(-learningRate, self.gradBias) -end - -function PartialLinear:sharedAccUpdateGradParameters(input, gradOutput, lr) - -- we do not need to accumulate parameters when sharing: - self:defaultAccUpdateGradParameters(input, gradOutput, lr) -end - -function PartialLinear:__tostring__() - return torch.type(self) .. - string.format('(%d -> %d)', self.inputsize, self.outputsize) .. - (self.bias == nil and ' without bias' or '') -end diff --git a/contrib/lua-torch/nn/PixelShuffle.lua b/contrib/lua-torch/nn/PixelShuffle.lua deleted file mode 100644 index dd58ed9480..0000000000 --- a/contrib/lua-torch/nn/PixelShuffle.lua +++ /dev/null @@ -1,111 +0,0 @@ -local PixelShuffle, parent = torch.class("nn.PixelShuffle", "nn.Module") - --- Shuffles pixels after upscaling with a ESPCNN model --- Converts a [batch x channel*r^2 x m x p] tensor to [batch x channel x r*m x r*p] --- tensor, where r is the upscaling factor. --- @param upscaleFactor - the upscaling factor to use -function PixelShuffle:__init(upscaleFactor) - parent.__init(self) - self.upscaleFactor = upscaleFactor - self.upscaleFactorSquared = self.upscaleFactor * self.upscaleFactor -end - --- Computes the forward pass of the layer i.e. Converts a --- [batch x channel*r^2 x m x p] tensor to [batch x channel x r*m x r*p] tensor. --- @param input - the input tensor to be shuffled of size [b x c*r^2 x m x p] --- @return output - the shuffled tensor of size [b x c x r*m x r*p] -function PixelShuffle:updateOutput(input) - self._intermediateShape = self._intermediateShape or torch.LongStorage(6) - self._outShape = self.outShape or torch.LongStorage() - self._shuffleOut = self._shuffleOut or input.new() - - local batched = false - local batchSize = 1 - local inputStartIdx = 1 - local outShapeIdx = 1 - if input:nDimension() == 4 then - batched = true - batchSize = input:size(1) - inputStartIdx = 2 - outShapeIdx = 2 - self._outShape:resize(4) - self._outShape[1] = batchSize - else - self._outShape:resize(3) - end - - --input is of size h/r w/r, rc output should be h, r, c - local channels = input:size(inputStartIdx) / self.upscaleFactorSquared - local inHeight = input:size(inputStartIdx + 1) - local inWidth = input:size(inputStartIdx + 2) - - self._intermediateShape[1] = batchSize - self._intermediateShape[2] = channels - self._intermediateShape[3] = self.upscaleFactor - self._intermediateShape[4] = self.upscaleFactor - self._intermediateShape[5] = inHeight - self._intermediateShape[6] = inWidth - - self._outShape[outShapeIdx] = channels - self._outShape[outShapeIdx + 1] = inHeight * self.upscaleFactor - self._outShape[outShapeIdx + 2] = inWidth * self.upscaleFactor - - local inputView = torch.view(input, self._intermediateShape) - - self._shuffleOut:resize(inputView:size(1), inputView:size(2), inputView:size(5), - inputView:size(3), inputView:size(6), inputView:size(4)) - self._shuffleOut:copy(inputView:permute(1, 2, 5, 3, 6, 4)) - - self.output = torch.view(self._shuffleOut, self._outShape) - - return self.output -end - --- Computes the backward pass of the layer, given the gradient w.r.t. the output --- this function computes the gradient w.r.t. the input. --- @param input - the input tensor of shape [b x c*r^2 x m x p] --- @param gradOutput - the tensor with the gradients w.r.t. output of shape [b x c x r*m x r*p] --- @return gradInput - a tensor of the same shape as input, representing the gradient w.r.t. input. -function PixelShuffle:updateGradInput(input, gradOutput) - self._intermediateShape = self._intermediateShape or torch.LongStorage(6) - self._shuffleIn = self._shuffleIn or input.new() - - local batchSize = 1 - local inputStartIdx = 1 - if input:nDimension() == 4 then - batchSize = input:size(1) - inputStartIdx = 2 - end - - local channels = input:size(inputStartIdx) / self.upscaleFactorSquared - local height = input:size(inputStartIdx + 1) - local width = input:size(inputStartIdx + 2) - - self._intermediateShape[1] = batchSize - self._intermediateShape[2] = channels - self._intermediateShape[3] = height - self._intermediateShape[4] = self.upscaleFactor - self._intermediateShape[5] = width - self._intermediateShape[6] = self.upscaleFactor - - local gradOutputView = torch.view(gradOutput, self._intermediateShape) - - self._shuffleIn:resize(gradOutputView:size(1), gradOutputView:size(2), gradOutputView:size(4), - gradOutputView:size(6), gradOutputView:size(3), gradOutputView:size(5)) - self._shuffleIn:copy(gradOutputView:permute(1, 2, 4, 6, 3, 5)) - - self.gradInput = torch.view(self._shuffleIn, input:size()) - - return self.gradInput -end - - -function PixelShuffle:clearState() - nn.utils.clear(self, { - "_intermediateShape", - "_outShape", - "_shuffleIn", - "_shuffleOut", - }) - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/Power.lua b/contrib/lua-torch/nn/Power.lua deleted file mode 100644 index 771183c483..0000000000 --- a/contrib/lua-torch/nn/Power.lua +++ /dev/null @@ -1,22 +0,0 @@ -local Power, parent = torch.class('nn.Power','nn.Module') - -function Power:__init(p) - parent.__init(self) - self.pow = p - if not p then - error('nn.Power(power)') - end -end - -function Power:updateOutput(input) - self.output:resizeAs(input):copy(input) - self.output:pow(self.pow) - return self.output -end - -function Power:updateGradInput(input, gradOutput) - self.gradInput:resizeAs(input):copy(input) - self.gradInput:pow(self.pow - 1) - self.gradInput:cmul(gradOutput):mul(self.pow) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/PrintSize.lua b/contrib/lua-torch/nn/PrintSize.lua deleted file mode 100644 index d8dc91bff7..0000000000 --- a/contrib/lua-torch/nn/PrintSize.lua +++ /dev/null @@ -1,36 +0,0 @@ -local PrintSize, parent = torch.class('nn.PrintSize', 'nn.Module') - -function PrintSize:__init(prefix) - parent.__init(self) - self.prefix = prefix or "PrintSize" -end - -function PrintSize:updateOutput(input) - self.output = input - local size - if torch.type(input) == 'table' then - size = input - elseif torch.type(input) == 'nil' then - size = 'missing size' - else - size = input:size() - end - print(self.prefix..":input\n", size) - return self.output -end - - -function PrintSize:updateGradInput(input, gradOutput) - local size - if torch.type(gradOutput) == 'table' then - size = gradOutput - elseif torch.type(gradOutput) == 'nil' then - size = 'missing size' - else - size = gradOutput:size() - end - print(self.prefix..":gradOutput\n", size) - self.gradInput = gradOutput - return self.gradInput -end - diff --git a/contrib/lua-torch/nn/Profile.lua b/contrib/lua-torch/nn/Profile.lua deleted file mode 100644 index 36cd909cdc..0000000000 --- a/contrib/lua-torch/nn/Profile.lua +++ /dev/null @@ -1,55 +0,0 @@ -local ProfileModule, parent = torch.class("nn.Profile", "nn.Decorator") - -function ProfileModule:__init(module, print_interval, name) - parent.__init(self, module) - self.print_interval = print_interval or 100 - self.name = name or torch.type(module) - self.module = module - self.numFwds = 0 - self.numBwds = 0 - self.summedFwdTime = 0 - self.summedBwdTime = 0 - self.timer = torch.Timer() -end - -function ProfileModule:updateOutput(input) - self.timer:reset() - self.output = self.module:updateOutput(input) - self.summedFwdTime = self.summedFwdTime + self.timer:time().real - self.numFwds = self.numFwds + 1 - if self.numFwds % self.print_interval == 0 then - print (string.format('%s took %.3f seconds for %d forward passes', - self.name, self.summedFwdTime, self.print_interval)) - self.numFwds = 0 - self.summedFwdTime = 0 - end - return self.output -end - -function ProfileModule:updateGradInput(input, gradOutput) - self.timer:reset() - self.gradInput = self.module:updateGradInput(input, gradOutput) - self.summedBwdTime = self.summedBwdTime + self.timer:time().real - self.numBwds = self.numBwds + 1 - if self.numBwds % self.print_interval == 0 then - print (string.format('%s took %.3f seconds for %d backward passes', - self.name, self.summedBwdTime, self.print_interval)) - self.numBwds = 0 - self.summedBwdTime = 0 - end - return self.gradInput -end - -local function makeTorchTimerSerializable() - -- The Timer object part of this class needs to be serializable - -- so that the layer can be saved, cloned, etc. We add a dummy - -- serialization of torch.Timer that just creates a new instance at read - local timerMetatable = getmetatable(torch.Timer()) - timerMetatable['__factory'] = torch.Timer - timerMetatable['write'] = function(object, file) end - timerMetatable['read'] = function(object, file, versionNumber) - return object - end -end - -makeTorchTimerSerializable() diff --git a/contrib/lua-torch/nn/README.md b/contrib/lua-torch/nn/README.md deleted file mode 100644 index 6efd60962f..0000000000 --- a/contrib/lua-torch/nn/README.md +++ /dev/null @@ -1,21 +0,0 @@ -[![Build Status](https://travis-ci.org/torch/nn.svg?branch=master)](https://travis-ci.org/torch/nn) - -# Neural Network Package # - -This package provides an easy and modular way to build and train simple or complex neural networks using [Torch](https://github.com/torch/torch7/blob/master/README.md): - * Modules are the bricks used to build neural networks. Each are themselves neural networks, but can be combined with other networks using containers to create complex neural networks: - * [Module](doc/module.md#nn.Module): abstract class inherited by all modules; - * [Containers](doc/containers.md#nn.Containers): composite and decorator classes like [`Sequential`](doc/containers.md#nn.Sequential), [`Parallel`](doc/containers.md#nn.Parallel), [`Concat`](doc/containers.md#nn.Concat) and [`NaN`](doc/containers.md#nn.NaN); - * [Transfer functions](doc/transfer.md#nn.transfer.dok): non-linear functions like [`Tanh`](doc/transfer.md#nn.Tanh) and [`Sigmoid`](doc/transfer.md#nn.Sigmoid); - * [Simple layers](doc/simple.md#nn.simplelayers.dok): like [`Linear`](doc/simple.md#nn.Linear), [`Mean`](doc/simple.md#nn.Mean), [`Max`](doc/simple.md#nn.Max) and [`Reshape`](doc/simple.md#nn.Reshape); - * [Table layers](doc/table.md#nn.TableLayers): layers for manipulating `table`s like [`SplitTable`](doc/table.md#nn.SplitTable), [`ConcatTable`](doc/table.md#nn.ConcatTable) and [`JoinTable`](doc/table.md#nn.JoinTable); - * [Convolution layers](doc/convolution.md#nn.convlayers.dok): [`Temporal`](doc/convolution.md#nn.TemporalModules), [`Spatial`](doc/convolution.md#nn.SpatialModules) and [`Volumetric`](doc/convolution.md#nn.VolumetricModules) convolutions; - * Criterions compute a gradient according to a given loss function given an input and a target: - * [Criterions](doc/criterion.md#nn.Criterions): a list of all criterions, including [`Criterion`](doc/criterion.md#nn.Criterion), the abstract class; - * [`MSECriterion`](doc/criterion.md#nn.MSECriterion): the Mean Squared Error criterion used for regression; - * [`ClassNLLCriterion`](doc/criterion.md#nn.ClassNLLCriterion): the Negative Log Likelihood criterion used for classification; - * Additional documentation: - * [Overview](doc/overview.md#nn.overview.dok) of the package essentials including modules, containers and training; - * [Training](doc/training.md#nn.traningneuralnet.dok): how to train a neural network using [`StochasticGradient`](doc/training.md#nn.StochasticGradient); - * [Testing](doc/testing.md): how to test your modules. - * [Experimental Modules](https://github.com/clementfarabet/lua---nnx/blob/master/README.md): a package containing experimental modules and criteria. diff --git a/contrib/lua-torch/nn/RReLU.lua b/contrib/lua-torch/nn/RReLU.lua deleted file mode 100644 index 843415f7e9..0000000000 --- a/contrib/lua-torch/nn/RReLU.lua +++ /dev/null @@ -1,50 +0,0 @@ -local ffi = require 'ffi' -local RReLU, parent = torch.class('nn.RReLU', 'nn.Module') - -function RReLU:__init(l, u, ip) - parent.__init(self) - self.lower = l or 1/8 - self.upper = u or 1/3 - assert(self.lower <= self.upper and self.lower >= 0 and self.upper >= 0) - self.noise = torch.Tensor() - self.train = true - self.inplace = ip or false -end - -function RReLU:updateOutput(input) - local gen = ffi.typeof('THGenerator**')(torch._gen)[0] - input.THNN.RReLU_updateOutput( - input:cdata(), - self.output:cdata(), - self.noise:cdata(), - self.lower, - self.upper, - self.train, - self.inplace, - gen - ) - return self.output -end - -function RReLU:updateGradInput(input, gradOutput) - input.THNN.RReLU_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.noise:cdata(), - self.lower, - self.upper, - self.train, - self.inplace - ) - return self.gradInput -end - -function RReLU:__tostring__() - return string.format('%s (l:%f, u:%f)', torch.type(self), self.lower, self.upper) -end - -function RReLU:clearState() - if self.noise then self.noise:set() end - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/ReLU.lua b/contrib/lua-torch/nn/ReLU.lua deleted file mode 100644 index a6eb271ee0..0000000000 --- a/contrib/lua-torch/nn/ReLU.lua +++ /dev/null @@ -1,5 +0,0 @@ -local ReLU, Parent = torch.class('nn.ReLU', 'nn.Threshold') - -function ReLU:__init(p) - Parent.__init(self,0,0,p) -end diff --git a/contrib/lua-torch/nn/ReLU6.lua b/contrib/lua-torch/nn/ReLU6.lua deleted file mode 100644 index 1cde00b463..0000000000 --- a/contrib/lua-torch/nn/ReLU6.lua +++ /dev/null @@ -1,32 +0,0 @@ -local ReLU6, parent = torch.class('nn.ReLU6', 'nn.Module') - -function ReLU6:__init(inplace) - parent.__init(self) - - if inplace == nil then - self.inplace = false - else - self.inplace = inplace - end - - if (inplace and type(inplace) ~= 'boolean') then - error('in-place flag must be boolean') - end -end - -function ReLU6:updateOutput(input) - input.THNN.HardTanh_updateOutput( - input:cdata(), - self.output:cdata(), - 0, 6, self.inplace) - return self.output -end - -function ReLU6:updateGradInput(input, gradOutput) - input.THNN.HardTanh_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - 0, 6, self.inplace) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Replicate.lua b/contrib/lua-torch/nn/Replicate.lua deleted file mode 100644 index c7dedd7672..0000000000 --- a/contrib/lua-torch/nn/Replicate.lua +++ /dev/null @@ -1,57 +0,0 @@ -local Replicate, parent = torch.class('nn.Replicate','nn.Module') - -function Replicate:__init(nf, dim, ndim) - parent.__init(self) - self.nfeatures = nf - self.dim = dim or 1 - self.ndim = ndim - assert(self.dim > 0, "Can only replicate across positive integer dimensions.") -end - -function Replicate:updateOutput(input) - self.dim = self.dim or 1 --backwards compatible - assert( - self.dim <= input:dim()+1, - "Not enough input dimensions to replicate along dimension " .. - tostring(self.dim) .. ".") - local batchOffset = self.ndim and input:dim() > self.ndim and 1 or 0 - local rdim = self.dim + batchOffset - local sz = torch.LongStorage(input:dim()+1) - sz[rdim] = self.nfeatures - for i = 1,input:dim() do - local offset = 0 - if i >= rdim then - offset = 1 - end - sz[i+offset] = input:size(i) - end - local st = torch.LongStorage(input:dim()+1) - st[rdim] = 0 - for i = 1,input:dim() do - local offset = 0 - if i >= rdim then - offset = 1 - end - st[i+offset] = input:stride(i) - end - self.output:set(input:storage(),input:storageOffset(),sz,st) - return self.output -end - -function Replicate:updateGradInput(input, gradOutput) - self.gradInput:resizeAs(input):zero() - local batchOffset = self.ndim and input:dim() > self.ndim and 1 or 0 - local rdim = self.dim + batchOffset - local sz = torch.LongStorage(input:dim()+1) - sz[rdim] = 1 - for i = 1,input:dim() do - local offset = 0 - if i >= rdim then - offset = 1 - end - sz[i+offset] = input:size(i) - end - local gradInput = self.gradInput:view(sz) - gradInput:sum(gradOutput, rdim) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Reshape.lua b/contrib/lua-torch/nn/Reshape.lua deleted file mode 100644 index d508369fab..0000000000 --- a/contrib/lua-torch/nn/Reshape.lua +++ /dev/null @@ -1,72 +0,0 @@ -local Reshape, parent = torch.class('nn.Reshape', 'nn.Module') - -function Reshape:__init(...) - parent.__init(self) - local arg = {...} - - self.size = torch.LongStorage() - self.batchsize = torch.LongStorage() - if torch.type(arg[#arg]) == 'boolean' then - self.batchMode = arg[#arg] - table.remove(arg, #arg) - end - local n = #arg - if n == 1 and torch.typename(arg[1]) == 'torch.LongStorage' then - self.size:resize(#arg[1]):copy(arg[1]) - else - self.size:resize(n) - for i=1,n do - self.size[i] = arg[i] - end - end - - self.nelement = 1 - self.batchsize:resize(#self.size+1) - for i=1,#self.size do - self.nelement = self.nelement * self.size[i] - self.batchsize[i+1] = self.size[i] - end -end - -function Reshape:updateOutput(input) - if not input:isContiguous() then - self._input = self._input or input.new() - self._input:resizeAs(input) - self._input:copy(input) - input = self._input - end - - if (self.batchMode == false) or ( - (self.batchMode == nil) and - (input:nElement() == self.nelement and input:size(1) ~= 1) - ) then - self.output:view(input, self.size) - else - self.batchsize[1] = input:size(1) - self.output:view(input, self.batchsize) - end - return self.output -end - -function Reshape:updateGradInput(input, gradOutput) - if not gradOutput:isContiguous() then - self._gradOutput = self._gradOutput or gradOutput.new() - self._gradOutput:resizeAs(gradOutput) - self._gradOutput:copy(gradOutput) - gradOutput = self._gradOutput - end - - self.gradInput:viewAs(gradOutput, input) - return self.gradInput -end - - -function Reshape:__tostring__() - return torch.type(self) .. '(' .. - table.concat(self.size:totable(), 'x') .. ')' -end - -function Reshape:clearState() - nn.utils.clear(self, '_input', '_gradOutput') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/Select.lua b/contrib/lua-torch/nn/Select.lua deleted file mode 100644 index be87c64659..0000000000 --- a/contrib/lua-torch/nn/Select.lua +++ /dev/null @@ -1,24 +0,0 @@ -local Select, parent = torch.class('nn.Select', 'nn.Module') - -function Select:__init(dimension,index) - parent.__init(self) - self.dimension = dimension - self.index = index -end - -function Select:updateOutput(input) - local dim = self.dimension < 0 and input:dim() + self.dimension + 1 or self.dimension - local index = self.index < 0 and input:size(dim) + self.index + 1 or self.index - local output = input:select(dim, index); - self.output:resizeAs(output) - return self.output:copy(output) -end - -function Select:updateGradInput(input, gradOutput) - local dim = self.dimension < 0 and input:dim() + self.dimension + 1 or self.dimension - local index = self.index < 0 and input:size(dim) + self.index + 1 or self.index - self.gradInput:resizeAs(input) - self.gradInput:zero() - self.gradInput:select(dim,index):copy(gradOutput) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SelectTable.lua b/contrib/lua-torch/nn/SelectTable.lua deleted file mode 100644 index ef26f3507b..0000000000 --- a/contrib/lua-torch/nn/SelectTable.lua +++ /dev/null @@ -1,71 +0,0 @@ -local SelectTable, parent = torch.class('nn.SelectTable', 'nn.Module') - -function SelectTable:__init(index) - parent.__init(self) - self.index = index - self.gradInput = {} -end - -function SelectTable:updateOutput(input) - - -- handle negative indices - local index = self.index - if type(index) == "number" then - index = index < 0 and #input + index + 1 or index - end - - assert(input[index], "index does not exist in the input table") - self.output = input[index] - - return self.output -end - -local function zeroTableCopy(t1, t2) - for k, v in pairs(t2) do - if (torch.type(v) == "table") then - t1[k] = zeroTableCopy(t1[k] or {}, t2[k]) - elseif torch.isTensor(v) then - if not t1[k] then - t1[k] = v:clone():zero() - else - t1[k]:resizeAs(v) - t1[k]:zero() - end - else - t1[k] = nil - end - end - for k, v in pairs(t1) do - if not t2[k] then - t1[k] = nil - end - end - return t1 -end - -function SelectTable:updateGradInput(input, gradOutput) - -- make gradInput a zeroed copy of input - zeroTableCopy(self.gradInput, input) - -- handle negative indices - local index = self.index - if type(index) == "number" then - index = index < 0 and #input + index + 1 or index - end - -- copy into gradInput[index] (necessary for variable sized inputs) - assert(self.gradInput[index]) - nn.utils.recursiveCopy(self.gradInput[index], gradOutput) - - return self.gradInput -end - -function SelectTable:type(type, tensorCache) - self.gradInput = {} - self.output = {} - return parent.type(self, type, tensorCache) -end - -function SelectTable:__tostring__() - return torch.type(self) .. '(' .. self.index .. ')' -end - -SelectTable.clearState = nn.Identity.clearState diff --git a/contrib/lua-torch/nn/Sequential.lua b/contrib/lua-torch/nn/Sequential.lua deleted file mode 100644 index 22b0886b82..0000000000 --- a/contrib/lua-torch/nn/Sequential.lua +++ /dev/null @@ -1,122 +0,0 @@ -local Sequential, _ = torch.class('nn.Sequential', 'nn.Container') - -function Sequential:__len() - return #self.modules -end - -function Sequential:add(module) - if #self.modules == 0 then - self.gradInput = module.gradInput - end - table.insert(self.modules, module) - self.output = module.output - return self -end - -function Sequential:insert(module, index) - index = index or (#self.modules + 1) - if index > (#self.modules + 1) or index < 1 then - error"index should be contiguous to existing modules" - end - table.insert(self.modules, index, module) - self.output = self.modules[#self.modules].output - self.gradInput = self.modules[1].gradInput -end - -function Sequential:remove(index) - index = index or #self.modules - if index > #self.modules or index < 1 then - error"index out of range" - end - table.remove(self.modules, index) - if #self.modules > 0 then - self.output = self.modules[#self.modules].output - self.gradInput = self.modules[1].gradInput - else - self.output = torch.Tensor() - self.gradInput = torch.Tensor() - end -end - -function Sequential:updateOutput(input) - local currentOutput = input - for i=1,#self.modules do - currentOutput = self:rethrowErrors(self.modules[i], i, 'updateOutput', currentOutput) - end - self.output = currentOutput - return currentOutput -end - -function Sequential:updateGradInput(input, gradOutput) - local currentGradOutput = gradOutput - local currentModule = self.modules[#self.modules] - for i=#self.modules-1,1,-1 do - local previousModule = self.modules[i] - currentGradOutput = self:rethrowErrors(currentModule, i+1, 'updateGradInput', previousModule.output, currentGradOutput) - currentModule = previousModule - end - currentGradOutput = self:rethrowErrors(currentModule, 1, 'updateGradInput', input, currentGradOutput) - self.gradInput = currentGradOutput - return currentGradOutput -end - -function Sequential:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - - local currentGradOutput = gradOutput - local currentModule = self.modules[#self.modules] - for i=#self.modules-1,1,-1 do - local previousModule = self.modules[i] - self:rethrowErrors(currentModule, i+1, 'accGradParameters', previousModule.output, currentGradOutput, scale) - currentGradOutput = currentModule.gradInput - currentModule = previousModule - end - - self:rethrowErrors(currentModule, 1, 'accGradParameters', input, currentGradOutput, scale) -end - -function Sequential:backward(input, gradOutput, scale) - scale = scale or 1 - local currentGradOutput = gradOutput - local currentModule = self.modules[#self.modules] - for i=#self.modules-1,1,-1 do - local previousModule = self.modules[i] - currentGradOutput = self:rethrowErrors(currentModule, i+1, 'backward', previousModule.output, currentGradOutput, scale) - currentModule.gradInput = currentGradOutput - currentModule = previousModule - end - currentGradOutput = self:rethrowErrors(currentModule, 1, 'backward', input, currentGradOutput, scale) - self.gradInput = currentGradOutput - return currentGradOutput -end - -function Sequential:accUpdateGradParameters(input, gradOutput, lr) - local currentGradOutput = gradOutput - local currentModule = self.modules[#self.modules] - for i=#self.modules-1,1,-1 do - local previousModule = self.modules[i] - self:rethrowErrors(currentModule, i+1, 'accUpdateGradParameters', previousModule.output, currentGradOutput, lr) - currentGradOutput = currentModule.gradInput - currentModule = previousModule - end - - self:rethrowErrors(currentModule, 1, 'accUpdateGradParameters', input, currentGradOutput, lr) -end - - -function Sequential:__tostring__() - local tab = ' ' - local line = '\n' - local next = ' -> ' - local str = 'nn.Sequential' - str = str .. ' {' .. line .. tab .. '[input' - for i=1,#self.modules do - str = str .. next .. '(' .. i .. ')' - end - str = str .. next .. 'output]' - for i=1,#self.modules do - str = str .. line .. tab .. '(' .. i .. '): ' .. tostring(self.modules[i]):gsub(line, line .. tab) - end - str = str .. line .. '}' - return str -end diff --git a/contrib/lua-torch/nn/Sigmoid.lua b/contrib/lua-torch/nn/Sigmoid.lua deleted file mode 100644 index 0126f6f8f8..0000000000 --- a/contrib/lua-torch/nn/Sigmoid.lua +++ /dev/null @@ -1,19 +0,0 @@ -local Sigmoid = torch.class('nn.Sigmoid', 'nn.Module') - -function Sigmoid:updateOutput(input) - input.THNN.Sigmoid_updateOutput( - input:cdata(), - self.output:cdata() - ) - return self.output -end - -function Sigmoid:updateGradInput(input, gradOutput) - input.THNN.Sigmoid_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.output:cdata() - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SmoothL1Criterion.lua b/contrib/lua-torch/nn/SmoothL1Criterion.lua deleted file mode 100644 index be636a94cd..0000000000 --- a/contrib/lua-torch/nn/SmoothL1Criterion.lua +++ /dev/null @@ -1,32 +0,0 @@ -local SmoothL1Criterion, parent = torch.class('nn.SmoothL1Criterion', 'nn.Criterion') - -function SmoothL1Criterion:__init(sizeAverage) - parent.__init(self) - if sizeAverage ~= nil then - self.sizeAverage = sizeAverage - else - self.sizeAverage = true - end -end - -function SmoothL1Criterion:updateOutput(input, target) - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.SmoothL1Criterion_updateOutput( - input:cdata(), - target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage - ) - self.output = self.output_tensor[1] - return self.output -end - -function SmoothL1Criterion:updateGradInput(input, target) - input.THNN.SmoothL1Criterion_updateGradInput( - input:cdata(), - target:cdata(), - self.gradInput:cdata(), - self.sizeAverage - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SoftMarginCriterion.lua b/contrib/lua-torch/nn/SoftMarginCriterion.lua deleted file mode 100644 index 96ccda8a45..0000000000 --- a/contrib/lua-torch/nn/SoftMarginCriterion.lua +++ /dev/null @@ -1,24 +0,0 @@ -local SoftMarginCriterion, parent = torch.class('nn.SoftMarginCriterion', 'nn.Criterion') - -function SoftMarginCriterion:__init() - parent.__init(self) - self.sizeAverage = true -end - -function SoftMarginCriterion:updateOutput(input, target) - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.SoftMarginCriterion_updateOutput( - input:cdata(), target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage) - self.output = self.output_tensor[1] - return self.output -end - -function SoftMarginCriterion:updateGradInput(input, target) - input.THNN.SoftMarginCriterion_updateGradInput( - input:cdata(), target:cdata(), - self.gradInput:cdata(), - self.sizeAverage) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SoftMax.lua b/contrib/lua-torch/nn/SoftMax.lua deleted file mode 100644 index 23a444cf6a..0000000000 --- a/contrib/lua-torch/nn/SoftMax.lua +++ /dev/null @@ -1,19 +0,0 @@ -local SoftMax, _ = torch.class('nn.SoftMax', 'nn.Module') - -function SoftMax:updateOutput(input) - input.THNN.SoftMax_updateOutput( - input:cdata(), - self.output:cdata() - ) - return self.output -end - -function SoftMax:updateGradInput(input, gradOutput) - input.THNN.SoftMax_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.output:cdata() - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SoftMin.lua b/contrib/lua-torch/nn/SoftMin.lua deleted file mode 100644 index 7da2a65895..0000000000 --- a/contrib/lua-torch/nn/SoftMin.lua +++ /dev/null @@ -1,31 +0,0 @@ -local SoftMin, parent = torch.class('nn.SoftMin', 'nn.Module') - -function SoftMin:updateOutput(input) - self.mininput = self.mininput or input.new() - self.mininput:resizeAs(input):copy(input):mul(-1) - input.THNN.SoftMax_updateOutput( - self.mininput:cdata(), - self.output:cdata() - ) - return self.output -end - -function SoftMin:updateGradInput(input, gradOutput) - self.mininput = self.mininput or input.new() - self.mininput:resizeAs(input):copy(input):mul(-1) - - input.THNN.SoftMax_updateGradInput( - self.mininput:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.output:cdata() - ) - - self.gradInput:mul(-1) - return self.gradInput -end - -function SoftMin:clearState() - if self.mininput then self.mininput:set() end - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SoftPlus.lua b/contrib/lua-torch/nn/SoftPlus.lua deleted file mode 100644 index f77b253800..0000000000 --- a/contrib/lua-torch/nn/SoftPlus.lua +++ /dev/null @@ -1,35 +0,0 @@ -local SoftPlus, parent = torch.class('nn.SoftPlus', 'nn.Module') - -function SoftPlus:__init(beta) - parent.__init(self) - self.beta = beta or 1 -- Beta controls sharpness of transfer function - self.threshold = 20 -- Avoid floating point issues with exp(x), x>20 -end - -function SoftPlus:updateOutput(input) - -- f(x) = 1/beta * log(1 + exp(beta * x)) - input.THNN.SoftPlus_updateOutput( - input:cdata(), - self.output:cdata(), - self.beta, - self.threshold - ) - return self.output -end - -function SoftPlus:updateGradInput(input, gradOutput) - -- d/dx[log(1+exp(k*x))/k] = exp(kx) / (exp(kx) + 1) - -- SINCE - -- y = (1/k)*log(1+exp(k*x)) --> x = (1/k)*log(exp(k*y)-1) - -- THEREFORE: - -- d/dx(f(x)) = (exp(k*y) - 1) / exp(k*y) - input.THNN.SoftPlus_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.output:cdata(), - self.beta, - self.threshold - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SoftShrink.lua b/contrib/lua-torch/nn/SoftShrink.lua deleted file mode 100644 index 67af15a980..0000000000 --- a/contrib/lua-torch/nn/SoftShrink.lua +++ /dev/null @@ -1,25 +0,0 @@ -local SoftShrink, parent = torch.class('nn.SoftShrink', 'nn.Module') - -function SoftShrink:__init(lam) - parent.__init(self) - self.lambda = lam or 0.5 -end - -function SoftShrink:updateOutput(input) - input.THNN.SoftShrink_updateOutput( - input:cdata(), - self.output:cdata(), - self.lambda - ) - return self.output -end - -function SoftShrink:updateGradInput(input, gradOutput) - input.THNN.SoftShrink_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.lambda - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SoftSign.lua b/contrib/lua-torch/nn/SoftSign.lua deleted file mode 100644 index ee72011f10..0000000000 --- a/contrib/lua-torch/nn/SoftSign.lua +++ /dev/null @@ -1,20 +0,0 @@ -local SoftSign, parent = torch.class('nn.SoftSign', 'nn.Module') - -function SoftSign:updateOutput(input) - self.temp = self.temp or input.new() - self.temp:resizeAs(input):copy(input):abs():add(1) - self.output:resizeAs(input):copy(input):cdiv(self.temp) - return self.output -end - -function SoftSign:updateGradInput(input, gradOutput) - self.tempgrad = self.tempgrad or input.new() - self.tempgrad:resizeAs(self.output):copy(input):abs():add(1):cmul(self.tempgrad) - self.gradInput:resizeAs(input):copy(gradOutput):cdiv(self.tempgrad) - return self.gradInput -end - -function SoftSign:clearState() - nn.utils.clear(self, 'temp', 'tempgrad') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SparseJacobian.lua b/contrib/lua-torch/nn/SparseJacobian.lua deleted file mode 100644 index 7f4c024444..0000000000 --- a/contrib/lua-torch/nn/SparseJacobian.lua +++ /dev/null @@ -1,277 +0,0 @@ -nn.SparseJacobian = {} - -function nn.SparseJacobian.backward (module, input, param, dparam) - local doparam = 0 - if param then - doparam = 1 - end - - -- output deriv - module:forward(input) - local dout = module.output.new():resizeAs(module.output) - -- 1D view - local sdout = module.output.new(dout:storage(), 1, dout:nElement()) - -- jacobian matrix to calculate - local jacobian - if doparam == 1 then - jacobian = torch.Tensor(param:nElement(), dout:nElement()):zero() - else - jacobian = torch.Tensor(input:size(1), dout:nElement()):zero() - end - - for i=1,sdout:nElement() do - dout:zero() - sdout[i] = 1 - module:zeroGradParameters() - local din = module:updateGradInput(input, dout) - module:accGradParameters(input, dout) - if doparam == 1 then - jacobian:select(2,i):copy(dparam) - else - jacobian:select(2,i):copy(din:select(2,2)) - end - end - - return jacobian -end - - -function nn.SparseJacobian.backwardUpdate (module, input, param) - - -- output deriv - module:forward(input) - local dout = module.output.new():resizeAs(module.output) - -- 1D view - local sdout = module.output.new(dout:storage(),1,dout:nElement()) - -- jacobian matrix to calculate - local jacobian = torch.Tensor(param:nElement(),dout:nElement()):zero() - - -- original param - local params = module:parameters() - local origparams = {} - for j=1,#params do - table.insert(origparams, params[j]:clone()) - end - - for i=1,sdout:nElement() do - -- Reset parameters - for j=1,#params do - params[j]:copy(origparams[j]) - end - dout:zero() - sdout[i] = 1 - module:zeroGradParameters() - module:updateGradInput(input, dout) - module:accUpdateGradParameters(input, dout, 1) - jacobian:select(2,i):copy(param) - end - - for j=1,#params do - params[j]:copy(origparams[j]) - end - - return jacobian -end - -function nn.SparseJacobian.forward(module, input, param) - local doparam = 0 - if param then - doparam = 1 - end - param = param or input - - -- perturbation amount - local small = 1e-6 - -- 1D view of input - --local tst = param:storage() - local sin - if doparam == 1 then - sin = param.new(param):resize(param:nElement()) - else - sin = input.new(input):select(2,2) - end - - local out = module:forward(input) - -- jacobian matrix to calculate - local jacobian - if doparam == 1 then - jacobian = torch.Tensor():resize(param:nElement(), - out:nElement()) - else - jacobian = torch.Tensor():resize(input:size(1), - out:nElement()) - end - - local outa = torch.Tensor(jacobian:size(2)) - local outb = torch.Tensor(jacobian:size(2)) - - for i=1,sin:nElement() do - sin[i] = sin[i] - small - outa:copy(module:forward(input)) - sin[i] = sin[i] + 2*small - outb:copy(module:forward(input)) - sin[i] = sin[i] - small - - outb:add(-1,outa):div(2*small) - jacobian:select(1,i):copy(outb) - end - - return jacobian -end - -function nn.SparseJacobian.forwardUpdate(module, input, param) - -- perturbation amount - local small = 1e-6 - -- 1D view of input - --local tst = param:storage() - local sin = param.new(param):resize(param:nElement())--param.new(tst,1,tst:size()) - -- jacobian matrix to calculate - local jacobian = torch.Tensor():resize(param:nElement(),module:forward(input):nElement()) - - local outa = torch.Tensor(jacobian:size(2)) - local outb = torch.Tensor(jacobian:size(2)) - - for i=1,sin:nElement() do - sin[i] = sin[i] - small - outa:copy(module:forward(input)) - sin[i] = sin[i] + 2*small - outb:copy(module:forward(input)) - sin[i] = sin[i] - small - - outb:add(-1,outa):div(2*small) - jacobian:select(1,i):copy(outb) - jacobian:select(1,i):mul(-1) - jacobian:select(1,i):add(sin[i]) - end - return jacobian -end - -function nn.SparseJacobian.testJacobian (module, input, minval, maxval) - minval = minval or -2 - maxval = maxval or 2 - local inrange = maxval - minval - input:select(2,2):copy(torch.rand(input:size(1)):mul(inrange):add(minval)) - local jac_fprop = nn.SparseJacobian.forward(module,input) - local jac_bprop = nn.SparseJacobian.backward(module,input) - local error = jac_fprop-jac_bprop - return error:abs():max() -end - -function nn.SparseJacobian.testJacobianParameters (module, input, param, dparam, minval, maxval) - minval = minval or -2 - maxval = maxval or 2 - local inrange = maxval - minval - input:select(2,2):copy(torch.rand(input:size(1)):mul(inrange):add(minval)) - param:copy(torch.rand(param:nElement()):mul(inrange):add(minval)) - local jac_bprop = nn.SparseJacobian.backward(module, input, param, dparam) - local jac_fprop = nn.SparseJacobian.forward(module, input, param) - local error = jac_fprop - jac_bprop - return error:abs():max() -end - -function nn.SparseJacobian.testJacobianUpdateParameters (module, input, param, minval, maxval) - minval = minval or -2 - maxval = maxval or 2 - local inrange = maxval - minval - input:select(2,2):copy(torch.rand(input:size(1)):mul(inrange):add(minval)) - param:copy(torch.rand(param:nElement()):mul(inrange):add(minval)) - local params_bprop = nn.SparseJacobian.backwardUpdate(module, input, param) - local params_fprop = nn.SparseJacobian.forwardUpdate(module, input, param) - - local error = params_fprop - params_bprop - return error:abs():max() -end - -function nn.SparseJacobian.testIO(module,input, minval, maxval) - minval = minval or -2 - maxval = maxval or 2 - local inrange = maxval - minval - - -- run module - module:forward(input) - local go = module.output:clone():copy(torch.rand(module.output:nElement()):mul(inrange):add(minval)) - module:zeroGradParameters() - module:updateGradInput(input,go) - module:accGradParameters(input,go) - - local fo = module.output:clone() - local bo = module.gradInput:clone() - - -- write module - local f = torch.DiskFile('tmp.bin','w'):binary() - f:writeObject(module) - f:close() - -- read module - local m = torch.DiskFile('tmp.bin'):binary():readObject() - m:forward(input) - m:zeroGradParameters() - m:updateGradInput(input,go) - m:accGradParameters(input,go) - -- cleanup - os.remove('tmp.bin') - - local fo2 = m.output:clone() - local bo2 = m.gradInput:clone() - - local errf = fo - fo2 - local errb = bo - bo2 - return errf:abs():max(), errb:abs():max() -end - -function nn.SparseJacobian.testAllUpdate(module, input, weight, gradWeight) - local gradOutput - local lr = torch.uniform(0.1, 1) - local errors = {} - - -- accGradParameters - local maccgp = module:clone() - local weightc = maccgp[weight]:clone() - maccgp:forward(input) - gradOutput = torch.rand(maccgp.output:size()) - maccgp:zeroGradParameters() - maccgp:updateGradInput(input, gradOutput) - maccgp:accGradParameters(input, gradOutput) - maccgp:updateParameters(lr) - errors["accGradParameters"] = (weightc-maccgp[gradWeight]*lr-maccgp[weight]):norm() - - -- accUpdateGradParameters - local maccugp = module:clone() - maccugp:forward(input) - maccugp:updateGradInput(input, gradOutput) - maccugp:accUpdateGradParameters(input, gradOutput, lr) - errors["accUpdateGradParameters"] = (maccugp[weight]-maccgp[weight]):norm() - - -- shared, accGradParameters - local macsh1 = module:clone() - local macsh2 = module:clone() - macsh2:share(macsh1, weight) - macsh1:forward(input) - macsh2:forward(input) - macsh1:zeroGradParameters() - macsh2:zeroGradParameters() - macsh1:updateGradInput(input, gradOutput) - macsh2:updateGradInput(input, gradOutput) - macsh1:accGradParameters(input, gradOutput) - macsh2:accGradParameters(input, gradOutput) - macsh1:updateParameters(lr) - macsh2:updateParameters(lr) - local err = (weightc-maccgp[gradWeight]*(lr*2)-macsh1[weight]):norm() - err = err + (weightc-maccgp[gradWeight]*(lr*2)-macsh2[weight]):norm() - errors["accGradParameters [shared]"] = err - - -- shared, accUpdateGradParameters - local macshu1 = module:clone() - local macshu2 = module:clone() - macshu2:share(macshu1, weight) - macshu1:forward(input) - macshu2:forward(input) - macshu1:updateGradInput(input, gradOutput) - macshu2:updateGradInput(input, gradOutput) - macshu1:accUpdateGradParameters(input, gradOutput, lr) - macshu2:accUpdateGradParameters(input, gradOutput, lr) - err = (weightc-maccgp[gradWeight]*(lr*2)-macshu1[weight]):norm() - err = err + (weightc-maccgp[gradWeight]*(lr*2)-macshu2[weight]):norm() - errors["accUpdateGradParameters [shared]"] = err - - return errors -end diff --git a/contrib/lua-torch/nn/SparseLinear.lua b/contrib/lua-torch/nn/SparseLinear.lua deleted file mode 100644 index 9a50c69122..0000000000 --- a/contrib/lua-torch/nn/SparseLinear.lua +++ /dev/null @@ -1,242 +0,0 @@ -local THNN = require 'nn.THNN' -local SparseLinear, parent = torch.class('nn.SparseLinear', 'nn.Module') - -local NO_LAST_INPUT = 0 -local ONE_LAST_INPUT = 1 -local ACC_MULTIPLE_TIMES = 2 - -function SparseLinear:__init(inputSize, outputSize, doGradInput) - parent.__init(self) - - self.weightDecay = 0 - self.doGradInput = doGradInput or false - self.weight = torch.Tensor(outputSize, inputSize):zero() - self.bias = torch.Tensor(outputSize):zero() - self.gradWeight = torch.Tensor(outputSize, inputSize):zero() - self.gradBias = torch.Tensor(outputSize):zero() - - assert(type(self.doGradInput) == type(true)) - - self.lastInput = nil - self.sparseUpdate = NO_LAST_INPUT - self.formatted_input = nil - - -- state - self.gradInput = {} - self.output:resize(outputSize) - - self:reset() -end - -function SparseLinear:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1./math.sqrt(self.weight:size(2)) - end - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv):mul(0.000001) -end - -function SparseLinear:reshapeInput(input) - if type(input) == 'table' then - return input, true, false - else - if input:dim() == 2 then - return {input}, false, false - else - return input, true, true - end - end -end - -function SparseLinear:updateOutput(input) - if self.sparseUpdate == ONE_LAST_INPUT then - self.sparseUpdate = ACC_MULTIPLE_TIMES - end - local input, batchMode, legacyMode = self:reshapeInput(input) - self.legacyMode = legacyMode - - if legacyMode then - input.THNN.SparseLinear_legacyUpdateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - self.bias:cdata() - ) - else - local nbatches = #input - if nbatches == 0 then - self.output:copy(self.bias) - return self.output - end - - local size = 0 - local marker = 1 - self.formatted_input = self.formatted_input or input[1].new() - - for i,v in ipairs(input) do size = size + input[i]:size(1) end - self.formatted_input:resize(size, 3) - for i,v in ipairs(input) do - local buf = self.formatted_input:narrow(1, marker, input[i]:size(1)) - buf:narrow(2,2,2):copy(input[i]) - buf:select(2,1):fill(i) - marker = marker + input[i]:size(1) - end - - self.output:resize(nbatches, self.weight:size(1)) - input[1].THNN.SparseLinear_updateOutput( - self.formatted_input:cdata(), - self.output:cdata(), - self.weight:cdata(), - self.bias:cdata() - ) - - -- fix output size for batchSize = 1 - if not batchMode then - self.output = self.output[1] - end - end - - return self.output -end - -function SparseLinear:accGradParameters(input, gradOutput, scale) - local input, batchMode, legacyMode = self:reshapeInput(input) - self.legacyMode = legacyMode - self.lastInput = self.lastInput or gradOutput.new() - if self.sparseUpdate == NO_LAST_INPUT then - local v = self.formatted_input - if self.legacyMode then v = input end - self.lastInput:resizeAs(v):copy(v) - self.sparseUpdate = ONE_LAST_INPUT - elseif self.sparseUpdate == ONE_LAST_INPUT then - self.sparseUpdate = ACC_MULTIPLE_TIMES - end - - if legacyMode then - input.THNN.SparseLinear_legacyAccGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.weightDecay or 0, - scale or 1 - ) - else - if not batchMode then - gradOutput:resize(1, gradOutput:size(1)) - end - - local rows = self.formatted_input:select(2, 1) - local cols = self.formatted_input:select(2, 2) - local sortinds = cols * gradOutput:size(1) + rows - local _, inds = sortinds:sort(1, false) - local newinput = self.formatted_input:index(1, inds) - input[1].THNN.SparseLinear_accGradParameters( - newinput:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.weightDecay or 0, - scale or 1 - ) - end -end - -function SparseLinear:updateGradInput(input, gradOutput) - if self.legacyMode then - if type(self.gradInput) ~= type(gradOutput) then self.gradInput = gradOutput.new() end - self.gradInput:resizeAs(input) - else - self.gradInput = {} - end - if self.doGradInput then - -- GradInput should be dense anyway - local gi - local batchMode = true - if gradOutput:dim() == 1 then - gi = self.weight:t()*gradOutput - batchMode = false - elseif gradOutput:dim() == 2 then - gi = gradOutput*self.weight - end - local ini = self.weight:size(2) - - if self.legacyMode then - local batches = self.gradInput:size(1) - self.gradInput:resize(batches, ini, 2) - self.gradInput:select(3,1):copy(torch.repeatTensor(torch.range(1, ini), batches, 1)) - self.gradInput:select(3,2):copy(gi) - else - local indicies = torch.range(1, ini) - if not batchMode then gi:resize(1, ini) end - for i = 1,gi:size(1) do - self.gradInput[i] = gradOutput.new(ini, 2) - self.gradInput[i]:select(2, 2):copy(gi[i]) - self.gradInput[i]:select(2, 1):range(1, ini) - end - end - end - return self.gradInput -end - --- These functions do sparse updates / zeros. However, if we accumulated --- gradients multiple times, we can't depend on the last input to do sparse --- updates. -function SparseLinear:updateParameters(learningRate) - if self.lastInput and self.sparseUpdate == ONE_LAST_INPUT then - if self.legacyMode then - self.lastInput.THNN.SparseLinear_legacyUpdateParameters( - self.weight:cdata(), - self.bias:cdata(), - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.lastInput:cdata(), - learningRate - ) - else - self.lastInput.THNN.SparseLinear_updateParameters( - self.weight:cdata(), - self.bias:cdata(), - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.lastInput:cdata(), - learningRate - ) - end - else - parent.updateParameters(self, learningRate) - end -end - -function SparseLinear:zeroGradParameters() - if self.lastInput and self.sparseUpdate == ONE_LAST_INPUT then - if self.legacyMode then - self.lastInput.THNN.SparseLinear_legacyZeroGradParameters( - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.lastInput:cdata() - ) - else - self.lastInput.THNN.SparseLinear_zeroGradParameters( - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.lastInput:cdata() - ) - end - else - parent.zeroGradParameters(self) - end - self.sparseUpdate = NO_LAST_INPUT -end - -function SparseLinear:clearState() - if self.lastInput then self.lastInput:set() end - input.THNN.SparseLinear_cudaClearState() - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialAdaptiveAveragePooling.lua b/contrib/lua-torch/nn/SpatialAdaptiveAveragePooling.lua deleted file mode 100644 index 2e223580a4..0000000000 --- a/contrib/lua-torch/nn/SpatialAdaptiveAveragePooling.lua +++ /dev/null @@ -1,35 +0,0 @@ -local SpatialAdaptiveAveragePooling, parent = torch.class('nn.SpatialAdaptiveAveragePooling', 'nn.Module') - -function SpatialAdaptiveAveragePooling:__init(W, H) - parent.__init(self) - - self.W = W - self.H = H -end - -function SpatialAdaptiveAveragePooling:updateOutput(input) - input.THNN.SpatialAdaptiveAveragePooling_updateOutput( - input:cdata(), - self.output:cdata(), - self.W, self.H - ) - return self.output -end - -function SpatialAdaptiveAveragePooling:updateGradInput(input, gradOutput) - input.THNN.SpatialAdaptiveAveragePooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata() - ) - return self.gradInput -end - --- for backward compat -function SpatialAdaptiveAveragePooling:empty() - self:clearState() -end - -function SpatialAdaptiveAveragePooling:clearState() - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialAdaptiveMaxPooling.lua b/contrib/lua-torch/nn/SpatialAdaptiveMaxPooling.lua deleted file mode 100644 index b78261c3df..0000000000 --- a/contrib/lua-torch/nn/SpatialAdaptiveMaxPooling.lua +++ /dev/null @@ -1,46 +0,0 @@ -local SpatialAdaptiveMaxPooling, parent = torch.class('nn.SpatialAdaptiveMaxPooling', 'nn.Module') - -function SpatialAdaptiveMaxPooling:__init(W, H) - parent.__init(self) - - self.W = W - self.H = H -end - -function SpatialAdaptiveMaxPooling:updateOutput(input) - self.indices = self.indices or torch.LongTensor() - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.indices = torch.CudaLongTensor and self.indices:cudaLong() or self.indices - else - self.indices = self.indices:long() - end - input.THNN.SpatialAdaptiveMaxPooling_updateOutput( - input:cdata(), - self.output:cdata(), - self.indices:cdata(), - self.W, self.H - ) - return self.output -end - -function SpatialAdaptiveMaxPooling:updateGradInput(input, gradOutput) - input.THNN.SpatialAdaptiveMaxPooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.indices:cdata() - ) - return self.gradInput -end - --- for backward compat -function SpatialAdaptiveMaxPooling:empty() - self:clearState() -end - -function SpatialAdaptiveMaxPooling:clearState() - if self.indices then - self.indices:set() - end - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialAutoCropMSECriterion.lua b/contrib/lua-torch/nn/SpatialAutoCropMSECriterion.lua deleted file mode 100644 index 97206a0627..0000000000 --- a/contrib/lua-torch/nn/SpatialAutoCropMSECriterion.lua +++ /dev/null @@ -1,74 +0,0 @@ ---[[ - SpatialAutoCropMSECriterion. - Implements the MSECriterion when the spatial resolution of the input is less than - or equal to the spatial resolution of the target. It achieves this center-cropping - the target to the same spatial resolution of the input and the MSE is then - calculated between these cropped inputs -]] -local SpatialAutoCropMSECriterion, parent = torch.class('nn.SpatialAutoCropMSECriterion', 'nn.MSECriterion') - -function SpatialAutoCropMSECriterion:__init(sizeAverage) - parent.__init(self, sizeAverage) -end - -local function centerCrop(input, cropSize) - assert(input:dim() == 3 or input:dim() == 4, "input should be a 3D or 4D tensor") - assert(#cropSize == 2, "cropSize should have two elements only") - local _input = input - if input:dim() == 3 then - _input = input:view(1, input:size(1), input:size(2), input:size(3)) - end - assert(cropSize[1] > 0 and cropSize[1] <= _input:size(3), - "0 < cropSize[1] <= input:size(3) not satisfied") - assert(cropSize[2] > 0 and cropSize[2] <= _input:size(4), - "0 < cropSize[1] <= input:size(3) not satisfied") - - local inputHeight = _input:size(3) - local inputWidth = _input:size(4) - - local rowStart = 1 + math.floor((inputHeight - cropSize[1])/2.0) - local rowEnd = rowStart + cropSize[1] - 1 - local colStart = 1 + math.floor((inputWidth - cropSize[2])/2.0) - local colEnd = colStart + cropSize[2] - 1 - if input:dim() == 3 then - return input[{{}, {rowStart, rowEnd}, {colStart, colEnd}}] - else - return input[{{}, {}, {rowStart, rowEnd}, {colStart, colEnd}}] - end -end - -local function getTensorHeightAndWidth(tensor) - local heightIdx = 2 - local widthIdx = 3 - if tensor:dim() == 4 then - heightIdx = 3 - widthIdx = 4 - end - return tensor:size(heightIdx), tensor:size(widthIdx) -end - -local function inputResolutionIsSmallerThanTargetResolution(input, target) - local inputHeight, inputWidth = getTensorHeightAndWidth(input) - local targetHeight, targetWidth = getTensorHeightAndWidth(target) - return inputHeight <= targetHeight and inputWidth <= targetWidth -end - -function SpatialAutoCropMSECriterion:updateOutput(input, target) - assert(input:dim() == target:dim(), "input and target should have the same number of dimensions") - assert(input:dim() == 4 or input:dim() == 3, "input and target must have 3 or 4 dimensions") - assert(inputResolutionIsSmallerThanTargetResolution(input, target), - "Spatial resolution of input should be less than or equal to the spatial resolution of the target") - - local inputHeight, inputWidth = getTensorHeightAndWidth(input) - local targetCropped = centerCrop(target, {inputHeight, inputWidth}) - return parent.updateOutput(self, input, targetCropped) -end - - -function SpatialAutoCropMSECriterion:updateGradInput(input, gradOutput) - assert(input:dim() == gradOutput:dim(), "input and gradOutput should have the same number of dimensions") - assert(input:dim() == 4 or input:dim() == 3, "input and gradOutput must have 3 or 4 dimensions") - assert(input:isSameSizeAs(gradOutput), "gradOutput and input must have the same size") - - return parent.updateGradInput(self, input, gradOutput) -end diff --git a/contrib/lua-torch/nn/SpatialAveragePooling.lua b/contrib/lua-torch/nn/SpatialAveragePooling.lua deleted file mode 100644 index 1e76668275..0000000000 --- a/contrib/lua-torch/nn/SpatialAveragePooling.lua +++ /dev/null @@ -1,93 +0,0 @@ -local SpatialAveragePooling, parent = torch.class('nn.SpatialAveragePooling', 'nn.Module') - -function SpatialAveragePooling:__init(kW, kH, dW, dH, padW, padH) - parent.__init(self) - - self.kW = kW - self.kH = kH - self.dW = dW or 1 - self.dH = dH or 1 - self.padW = padW or 0 - self.padH = padH or 0 - self.ceil_mode = false - self.count_include_pad = true - self.divide = true -end - -function SpatialAveragePooling:ceil() - self.ceil_mode = true - return self -end - -function SpatialAveragePooling:floor() - self.ceil_mode = false - return self -end - -function SpatialAveragePooling:setCountIncludePad() - self.count_include_pad = true - return self -end - -function SpatialAveragePooling:setCountExcludePad() - self.count_include_pad = false - return self -end - -local function backwardCompatible(self) - if self.ceil_mode == nil then - self.ceil_mode = false - self.count_include_pad = true - self.padH = 0 - self.padW = 0 - end -end - -function SpatialAveragePooling:updateOutput(input) - backwardCompatible(self) - input.THNN.SpatialAveragePooling_updateOutput( - input:cdata(), - self.output:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.ceil_mode, - self.count_include_pad - ) - -- for backward compatibility with saved models - -- which are not supposed to have "divide" field - if not self.divide then - self.output:mul(self.kW*self.kH) - end - return self.output -end - -function SpatialAveragePooling:updateGradInput(input, gradOutput) - if self.gradInput then - input.THNN.SpatialAveragePooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.ceil_mode, - self.count_include_pad - ) - -- for backward compatibility - if not self.divide then - self.gradInput:mul(self.kW*self.kH) - end - return self.gradInput - end -end - -function SpatialAveragePooling:__tostring__() - local s = string.format('%s(%dx%d, %d,%d', torch.type(self), - self.kW, self.kH, self.dW, self.dH) - if (self.padW or self.padH) and (self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padW .. ','.. self.padH - end - s = s .. ')' - return s -end diff --git a/contrib/lua-torch/nn/SpatialBatchNormalization.lua b/contrib/lua-torch/nn/SpatialBatchNormalization.lua deleted file mode 100644 index c5004ce3a3..0000000000 --- a/contrib/lua-torch/nn/SpatialBatchNormalization.lua +++ /dev/null @@ -1,35 +0,0 @@ ---[[ - This file implements Batch Normalization as described in the paper: - "Batch Normalization: Accelerating Deep Network Training - by Reducing Internal Covariate Shift" - by Sergey Ioffe, Christian Szegedy - - This implementation is useful for inputs coming from convolution layers. - For non-convolutional layers, see BatchNormalization.lua - - The operation implemented is: - y = ( x - mean(x) ) - -------------------- * gamma + beta - standard-deviation(x) - where gamma and beta are learnable parameters. - - The learning of gamma and beta is optional. - - Usage: - with learnable parameters: nn.SpatialBatchNormalization(N [,eps] [,momentum]) - where N = dimensionality of input - without learnable parameters: nn.SpatialBatchNormalization(N [,eps] [,momentum], false) - - eps is a small value added to the variance to avoid divide-by-zero. - Defaults to 1e-5 - - In training time, this layer keeps a running estimate of it's computed mean and std. - The running sum is kept with a default momentum of 0.1 (unless over-ridden) - In test time, this running mean/std is used to normalize. -]]-- -local BN, parent = torch.class('nn.SpatialBatchNormalization', 'nn.BatchNormalization') - -BN.__version = 2 - --- expected dimension of input -BN.nDim = 4 diff --git a/contrib/lua-torch/nn/SpatialClassNLLCriterion.lua b/contrib/lua-torch/nn/SpatialClassNLLCriterion.lua deleted file mode 100644 index fbd3674103..0000000000 --- a/contrib/lua-torch/nn/SpatialClassNLLCriterion.lua +++ /dev/null @@ -1,81 +0,0 @@ -local THNN = require 'nn.THNN' -local SpatialClassNLLCriterion, parent = torch.class('nn.SpatialClassNLLCriterion', 'nn.Criterion') - -function SpatialClassNLLCriterion:__init(weights, sizeAverage) - parent.__init(self) - if sizeAverage ~= nil then - self.sizeAverage = sizeAverage - else - self.sizeAverage = true - end - if weights then - assert(weights:dim() == 1, "weights input should be 1-D Tensor") - self.weights = weights - end - - self.output_tensor = torch.zeros(1) - self.total_weight_tensor = torch.ones(1) - self.target = torch.zeros(1):long() -end - -function SpatialClassNLLCriterion:__len() - if (self.weights) then - return #self.weights - else - return 0 - end -end - -function SpatialClassNLLCriterion:updateOutput(input, target) - if type(target) == 'number' then - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.target = torch.CudaLongTensor and self.target:cudaLong() or self.target:cuda() - else - self.target = self.target:long() - end - self.target[1] = target - elseif torch.typename(input):find('torch%.Cuda.*Tensor') then - self.target = torch.CudaLongTensor and target:cudaLong() or target - else - self.target = target:long() - end - - input.THNN.SpatialClassNLLCriterion_updateOutput( - input:cdata(), - self.target:cdata(), - self.output_tensor:cdata(), - self.sizeAverage, - THNN.optionalTensor(self.weights), - self.total_weight_tensor:cdata() - ) - self.output = self.output_tensor[1] - return self.output, self.total_weight_tensor[1] -end - -function SpatialClassNLLCriterion:updateGradInput(input, target) - if type(target) == 'number' then - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.target = torch.CudaLongTensor and self.target:cudaLong() or self.target:cuda() - else - self.target = self.target:long() - end - self.target[1] = target - elseif torch.typename(input):find('torch%.Cuda.*Tensor') then - self.target = torch.CudaLongTensor and target:cudaLong() or target - else - self.target = target:long() - end - - self.gradInput:resizeAs(input):zero() - - input.THNN.SpatialClassNLLCriterion_updateGradInput( - input:cdata(), - self.target:cdata(), - self.gradInput:cdata(), - self.sizeAverage, - THNN.optionalTensor(self.weights), - self.total_weight_tensor:cdata() - ) - - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SpatialContrastiveNormalization.lua b/contrib/lua-torch/nn/SpatialContrastiveNormalization.lua deleted file mode 100644 index 0ad251ae4b..0000000000 --- a/contrib/lua-torch/nn/SpatialContrastiveNormalization.lua +++ /dev/null @@ -1,36 +0,0 @@ -local SpatialContrastiveNormalization, parent = torch.class('nn.SpatialContrastiveNormalization','nn.Module') - -function SpatialContrastiveNormalization:__init(nInputPlane, kernel, threshold, thresval) - parent.__init(self) - - -- get args - self.nInputPlane = nInputPlane or 1 - self.kernel = kernel or torch.Tensor(9,9):fill(1) - self.threshold = threshold or 1e-4 - self.thresval = thresval or threshold or 1e-4 - local kdim = self.kernel:nDimension() - - -- check args - if kdim ~= 2 and kdim ~= 1 then - error(' averaging kernel must be 2D or 1D') - end - if (self.kernel:size(1) % 2) == 0 or (kdim == 2 and (self.kernel:size(2) % 2) == 0) then - error(' averaging kernel must have ODD dimensions') - end - - -- instantiate sub+div normalization - self.normalizer = nn.Sequential() - self.normalizer:add(nn.SpatialSubtractiveNormalization(self.nInputPlane, self.kernel)) - self.normalizer:add(nn.SpatialDivisiveNormalization(self.nInputPlane, self.kernel, - self.threshold, self.thresval)) -end - -function SpatialContrastiveNormalization:updateOutput(input) - self.output = self.normalizer:forward(input) - return self.output -end - -function SpatialContrastiveNormalization:updateGradInput(input, gradOutput) - self.gradInput = self.normalizer:backward(input, gradOutput) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SpatialConvolution.lua b/contrib/lua-torch/nn/SpatialConvolution.lua deleted file mode 100644 index 15a2b4b628..0000000000 --- a/contrib/lua-torch/nn/SpatialConvolution.lua +++ /dev/null @@ -1,155 +0,0 @@ -local THNN = require 'nn.THNN' -local SpatialConvolution, parent = torch.class('nn.SpatialConvolution', 'nn.Module') - -function SpatialConvolution:__init(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH) - parent.__init(self) - - dW = dW or 1 - dH = dH or 1 - - self.nInputPlane = nInputPlane - self.nOutputPlane = nOutputPlane - self.kW = kW - self.kH = kH - - self.dW = dW - self.dH = dH - self.padW = padW or 0 - self.padH = padH or self.padW - - self.weight = torch.Tensor(nOutputPlane, nInputPlane, kH, kW) - self.bias = torch.Tensor(nOutputPlane) - self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane, kH, kW) - self.gradBias = torch.Tensor(nOutputPlane) - - self:reset() -end - -function SpatialConvolution:noBias() - self.bias = nil - self.gradBias = nil - return self -end - -function SpatialConvolution:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1/math.sqrt(self.kW*self.kH*self.nInputPlane) - end - if nn.oldSeed then - self.weight:apply(function() - return torch.uniform(-stdv, stdv) - end) - if self.bias then - self.bias:apply(function() - return torch.uniform(-stdv, stdv) - end) - end - else - self.weight:uniform(-stdv, stdv) - if self.bias then - self.bias:uniform(-stdv, stdv) - end - end -end - -local function backCompatibility(self) - self.finput = self.finput or self.weight.new() - self.fgradInput = self.fgradInput or self.weight.new() - if self.padding then - self.padW = self.padding - self.padH = self.padding - self.padding = nil - else - self.padW = self.padW or 0 - self.padH = self.padH or 0 - end - if self.weight:dim() == 2 then - self.weight = self.weight:view(self.nOutputPlane, self.nInputPlane, self.kH, self.kW) - end - if self.gradWeight and self.gradWeight:dim() == 2 then - self.gradWeight = self.gradWeight:view(self.nOutputPlane, self.nInputPlane, self.kH, self.kW) - end -end - -function SpatialConvolution:updateOutput(input) - assert(input.THNN, torch.type(input)..'.THNN backend not imported') - backCompatibility(self) - input.THNN.SpatialConvolutionMM_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - THNN.optionalTensor(self.bias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH - ) - return self.output -end - -function SpatialConvolution:updateGradInput(input, gradOutput) - assert(input.THNN, torch.type(input)..'.THNN backend not imported') - if self.gradInput then - backCompatibility(self) - input.THNN.SpatialConvolutionMM_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH - ) - return self.gradInput - end -end - -function SpatialConvolution:accGradParameters(input, gradOutput, scale) - assert(input.THNN, torch.type(input)..'.THNN backend not imported') - scale = scale or 1 - backCompatibility(self) - input.THNN.SpatialConvolutionMM_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - THNN.optionalTensor(self.gradBias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - scale - ) -end - -function SpatialConvolution:type(type,tensorCache) - self.finput = self.finput and torch.Tensor() - self.fgradInput = self.fgradInput and torch.Tensor() - return parent.type(self,type,tensorCache) -end - -function SpatialConvolution:__tostring__() - local s = string.format('%s(%d -> %d, %dx%d', torch.type(self), - self.nInputPlane, self.nOutputPlane, self.kW, self.kH) - if self.dW ~= 1 or self.dH ~= 1 or self.padW ~= 0 or self.padH ~= 0 then - s = s .. string.format(', %d,%d', self.dW, self.dH) - end - if (self.padW or self.padH) and (self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padW .. ',' .. self.padH - end - if self.bias then - return s .. ')' - else - return s .. ') without bias' - end -end - -function SpatialConvolution:clearState() - nn.utils.clear(self, 'finput', 'fgradInput', '_input', '_gradOutput') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialConvolutionLocal.lua b/contrib/lua-torch/nn/SpatialConvolutionLocal.lua deleted file mode 100644 index 9494c2ffe5..0000000000 --- a/contrib/lua-torch/nn/SpatialConvolutionLocal.lua +++ /dev/null @@ -1,188 +0,0 @@ -local SpatialConvolutionLocal, parent = torch.class('nn.SpatialConvolutionLocal', 'nn.Module') - -function SpatialConvolutionLocal:__init(nInputPlane, nOutputPlane, iW, iH ,kW, kH, dW, dH, padW, padH) - parent.__init(self) - - dW = dW or 1 - dH = dH or 1 - - self.nInputPlane = nInputPlane - self.nOutputPlane = nOutputPlane - self.kW = kW - self.kH = kH - self.iW = iW - self.iH = iH - - self.dW = dW - self.dH = dH - self.padW = padW or 0 - self.padH = padH or self.padW - self.oW = math.floor((self.padW * 2 + iW - self.kW) / self.dW) + 1 - self.oH = math.floor((self.padH * 2 + iH - self.kH) / self.dH) + 1 - assert(1 <= self.oW and 1 <= self.oH, 'illegal configuration: output width or height less than 1') - - self.weight = torch.Tensor(self.oH, self.oW, nOutputPlane, nInputPlane, kH, kW) - self.bias = torch.Tensor(nOutputPlane, self.oH, self.oW) - self.gradWeight = torch.Tensor():resizeAs(self.weight) - self.gradBias = torch.Tensor():resizeAs(self.bias) - - self:reset() -end - -function SpatialConvolutionLocal:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1/math.sqrt(self.kW*self.kH*self.nInputPlane) - end - if nn.oldSeed then - self.weight:apply(function() - return torch.uniform(-stdv, stdv) - end) - self.bias:apply(function() - return torch.uniform(-stdv, stdv) - end) - else - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv) - end -end - -local function viewWeight(self) - self.weight = self.weight:view(self.oH * self.oW, self.nOutputPlane, self.nInputPlane * self.kH * self.kW) - if self.gradWeight and self.gradWeight:dim() > 0 then - self.gradWeight = self.gradWeight:view(self.oH * self.oW, self.nOutputPlane, self.nInputPlane * self.kH * self.kW) - end -end - -local function unviewWeight(self) - self.weight = self.weight:view(self.oH, self.oW, self.nOutputPlane, self.nInputPlane, self.kH, self.kW) - if self.gradWeight and self.gradWeight:dim() > 0 then - self.gradWeight = self.gradWeight:view(self.oH, self.oW, self.nOutputPlane, self.nInputPlane, self.kH, self.kW) - end -end - -local function checkInputSize(self, input) - if input:nDimension() == 3 then - if input:size(1) ~= self.nInputPlane or input:size(2) ~= self.iH or input:size(3) ~= self.iW then - error(string.format('Given input size: (%dx%dx%d) inconsistent with expected input size: (%dx%dx%d).', - input:size(1), input:size(2), input:size(3), self.nInputPlane, self.iH, self.iW)) - end - elseif input:nDimension() == 4 then - if input:size(2) ~= self.nInputPlane or input:size(3) ~= self.iH or input:size(4) ~= self.iW then - error(string.format('Given input size: (%dx%dx%dx%d) inconsistent with expected input size: (batchsize x%dx%dx%d).', - input:size(1), input:size(2), input:size(3), input:size(4), self.nInputPlane, self.iH, self.iW)) - end - else - error('3D or 4D(batch mode) tensor expected') - end -end - -local function checkOutputSize(self, input, output) - if output:nDimension() ~= input:nDimension() then - error('inconsistent dimension between output and input.') - end - if output:nDimension() == 3 then - if output:size(1) ~= self.nOutputPlane or output:size(2) ~= self.oH or output:size(3) ~= self.oW then - error(string.format('Given output size: (%dx%dx%d) inconsistent with expected output size: (%dx%dx%d).', - output:size(1), output:size(2), output:size(3), self.nOutputPlane, self.oH, self.oW)) - end - elseif output:nDimension() == 4 then - if output:size(2) ~= self.nOutputPlane or output:size(3) ~= self.oH or output:size(4) ~= self.oW then - error(string.format('Given output size: (%dx%dx%dx%d) inconsistent with expected output size: (batchsize x%dx%dx%d).', - output:size(1), output:size(2), output:size(3), output:size(4), self.nOutputPlane, self.oH, self.oW)) - end - else - error('3D or 4D(batch mode) tensor expected') - end -end - -function SpatialConvolutionLocal:updateOutput(input) - self.finput = self.finput or input.new() - self.fgradInput = self.fgradInput or input.new() - checkInputSize(self, input) - viewWeight(self) - input.THNN.SpatialConvolutionLocal_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.iW, self.iH, - self.oW, self.oH - ) - unviewWeight(self) - return self.output -end - -function SpatialConvolutionLocal:updateGradInput(input, gradOutput) - checkInputSize(self, input) - checkOutputSize(self, input, gradOutput) - if self.gradInput then - viewWeight(self) - input.THNN.SpatialConvolutionLocal_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.iW, self.iH, - self.oW, self.oH - ) - unviewWeight(self) - return self.gradInput - end -end - -function SpatialConvolutionLocal:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - checkInputSize(self, input) - checkOutputSize(self, input, gradOutput) - viewWeight(self) - input.THNN.SpatialConvolutionLocal_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.iW, self.iH, - self.oW, self.oH, - scale - ) - unviewWeight(self) -end - -function SpatialConvolutionLocal:type(type,tensorCache) - self.finput = self.finput and torch.Tensor() - self.fgradInput = self.fgradInput and torch.Tensor() - return parent.type(self,type,tensorCache) -end - -function SpatialConvolutionLocal:__tostring__() - local s = string.format('%s(%d -> %d, %dx%d, %dx%d', torch.type(self), - self.nInputPlane, self.nOutputPlane, self.iW, self.iH, self.kW, self.kH) - if self.dW ~= 1 or self.dH ~= 1 or self.padW ~= 0 or self.padH ~= 0 then - s = s .. string.format(', %d,%d', self.dW, self.dH) - end - if (self.padW or self.padH) and (self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padW .. ',' .. self.padH - end - return s .. ')' -end - -function SpatialConvolutionLocal:clearState() - nn.utils.clear(self, 'finput', 'fgradInput', '_input', '_gradOutput') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialConvolutionMM.lua b/contrib/lua-torch/nn/SpatialConvolutionMM.lua deleted file mode 100644 index f20734f9b9..0000000000 --- a/contrib/lua-torch/nn/SpatialConvolutionMM.lua +++ /dev/null @@ -1,139 +0,0 @@ -local THNN = require 'nn.THNN' -local SpatialConvolutionMM, parent = torch.class('nn.SpatialConvolutionMM', 'nn.Module') - -function SpatialConvolutionMM:__init(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH) - parent.__init(self) - - dW = dW or 1 - dH = dH or 1 - - self.nInputPlane = nInputPlane - self.nOutputPlane = nOutputPlane - self.kW = kW - self.kH = kH - - self.dW = dW - self.dH = dH - self.padW = padW or 0 - self.padH = padH or self.padW - - self.weight = torch.Tensor(nOutputPlane, nInputPlane*kH*kW) - self.bias = torch.Tensor(nOutputPlane) - self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane*kH*kW) - self.gradBias = torch.Tensor(nOutputPlane) - - self:reset() -end - -function SpatialConvolutionMM:noBias() - self.bias = nil - self.gradBias = nil - return self -end - -function SpatialConvolutionMM:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1/math.sqrt(self.kW*self.kH*self.nInputPlane) - end - if nn.oldSeed then - self.weight:apply(function() - return torch.uniform(-stdv, stdv) - end) - self.bias:apply(function() - return torch.uniform(-stdv, stdv) - end) - else - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv) - end -end - -function SpatialConvolutionMM:updateOutput(input) - assert(input.THNN, torch.type(input)..'.THNN backend not imported') - self.finput = self.finput or input.new() - self.fgradInput = self.fgradInput or input.new() - -- backward compatibility - if self.padding then - self.padW = self.padding - self.padH = self.padding - self.padding = nil - end - input.THNN.SpatialConvolutionMM_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - THNN.optionalTensor(self.bias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH - ) - return self.output -end - -function SpatialConvolutionMM:updateGradInput(input, gradOutput) - assert(input.THNN, torch.type(input)..'.THNN backend not imported') - if self.gradInput then - input.THNN.SpatialConvolutionMM_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH - ) - return self.gradInput - end -end - -function SpatialConvolutionMM:accGradParameters(input, gradOutput, scale) - assert(input.THNN, torch.type(input)..'.THNN backend not imported') - scale = scale or 1 - assert((self.bias and self.gradBias) or (self.bias == nil and self.gradBias == nil)) - input.THNN.SpatialConvolutionMM_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - THNN.optionalTensor(self.gradBias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - scale - ) -end - -function SpatialConvolutionMM:type(type,tensorCache) - self.finput = self.finput and torch.Tensor() - self.fgradInput = self.fgradInput and torch.Tensor() - return parent.type(self,type,tensorCache) -end - -function SpatialConvolutionMM:__tostring__() - local s = string.format('%s(%d -> %d, %dx%d', torch.type(self), - self.nInputPlane, self.nOutputPlane, self.kW, self.kH) - if self.dW ~= 1 or self.dH ~= 1 or self.padW ~= 0 or self.padH ~= 0 then - s = s .. string.format(', %d,%d', self.dW, self.dH) - end - if (self.padW or self.padH) and (self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padW .. ',' .. self.padH - end - if self.bias then - return s .. ')' - else - return s .. ') without bias' - end -end - -function SpatialConvolutionMM:clearState() - nn.utils.clear(self, 'finput', 'fgradInput', '_input', '_gradOutput') - return parent.clearState(self) -end - diff --git a/contrib/lua-torch/nn/SpatialConvolutionMap.lua b/contrib/lua-torch/nn/SpatialConvolutionMap.lua deleted file mode 100644 index 9051c119eb..0000000000 --- a/contrib/lua-torch/nn/SpatialConvolutionMap.lua +++ /dev/null @@ -1,154 +0,0 @@ -local SpatialConvolutionMap, parent = torch.class('nn.SpatialConvolutionMap', 'nn.Module') - -nn.tables = nn.tables or {} - -function nn.tables.full(nin, nout) - local ft = torch.Tensor(nin*nout,2) - local p = 1 - for j=1,nout do - for i=1,nin do - ft[p][1] = i - ft[p][2] = j - p = p + 1 - end - end - return ft -end - -function nn.tables.oneToOne(nfeat) - local ft = torch.Tensor(nfeat,2) - for i=1,nfeat do - ft[i][1] = i - ft[i][2] = i - end - return ft -end - -function nn.tables.random(nin, nout, nto) - local nker = nto * nout - local tbl = torch.Tensor(nker, 2) - local fi = torch.randperm(nin) - local frcntr = 1 - local nfi = math.floor(nin/nto) -- number of distinct nto chunks - local totbl = tbl:select(2,2) - local frtbl = tbl:select(2,1) - local fitbl = fi:narrow(1, 1, (nfi * nto)) -- part of fi that covers distinct chunks - local ufrtbl= frtbl:unfold(1, nto, nto) - local utotbl= totbl:unfold(1, nto, nto) - local ufitbl= fitbl:unfold(1, nto, nto) - - -- start filling frtbl - for i=1,nout do -- fro each unit in target map - ufrtbl:select(1,i):copy(ufitbl:select(1,frcntr)) - frcntr = frcntr + 1 - if frcntr-1 == nfi then -- reset fi - fi:copy(torch.randperm(nin)) - frcntr = 1 - end - end - for tocntr=1,utotbl:size(1) do - utotbl:select(1,tocntr):fill(tocntr) - end - return tbl -end - -function SpatialConvolutionMap:__init(conMatrix, kW, kH, dW, dH) - parent.__init(self) - - dW = dW or 1 - dH = dH or 1 - - self.kW = kW - self.kH = kH - self.dW = dW - self.dH = dH - self.connTable = conMatrix - self.nInputPlane = self.connTable:select(2,1):max() - self.nOutputPlane = self.connTable:select(2,2):max() - self.weight = torch.Tensor(self.connTable:size(1), kH, kW) - self.bias = torch.Tensor(self.nOutputPlane) - self.gradWeight = torch.Tensor(self.connTable:size(1), kH, kW) - self.gradBias = torch.Tensor(self.nOutputPlane) - - self:reset() -end - -function SpatialConvolutionMap:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - if nn.oldSeed then - self.weight:apply(function() - return torch.uniform(-stdv, stdv) - end) - self.bias:apply(function() - return torch.uniform(-stdv, stdv) - end) - else - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv) - end - else - local ninp = torch.Tensor(self.nOutputPlane):zero() - for i=1,self.connTable:size(1) do ninp[self.connTable[i][2]] = ninp[self.connTable[i][2]]+1 end - for k=1,self.connTable:size(1) do - stdv = 1/math.sqrt(self.kW*self.kH*ninp[self.connTable[k][2]]) - if nn.oldSeed then - self.weight:select(1,k):apply(function() return torch.uniform(-stdv,stdv) end) - else - self.weight:select(1,k):uniform(-stdv,stdv) - end - end - for k=1,self.bias:size(1) do - stdv = 1/math.sqrt(self.kW*self.kH*ninp[k]) - self.bias[k] = torch.uniform(-stdv,stdv) - end - end -end - -function SpatialConvolutionMap:updateOutput(input) - input.THNN.SpatialConvolutionMap_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.connTable:cdata(), - self.nInputPlane, - self.nOutputPlane, - self.dW, self.dH - ) - return self.output -end - -function SpatialConvolutionMap:updateGradInput(input, gradOutput) - input.THNN.SpatialConvolutionMap_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.connTable:cdata(), - self.nInputPlane, - self.nOutputPlane, - self.dW, self.dH - ) - return self.gradInput -end - -function SpatialConvolutionMap:accGradParameters(input, gradOutput, scale) - input.THNN.SpatialConvolutionMap_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.connTable:cdata(), - self.nInputPlane, - self.nOutputPlane, - self.dW, self.dH, - scale or 1 - ) -end - -function SpatialConvolutionMap:decayParameters(decay) - self.weight:add(-decay, self.weight) - self.bias:add(-decay, self.bias) -end diff --git a/contrib/lua-torch/nn/SpatialCrossMapLRN.lua b/contrib/lua-torch/nn/SpatialCrossMapLRN.lua deleted file mode 100644 index 088eb07f0f..0000000000 --- a/contrib/lua-torch/nn/SpatialCrossMapLRN.lua +++ /dev/null @@ -1,153 +0,0 @@ -local SpatialCrossMapLRN, parent = torch.class('nn.SpatialCrossMapLRN', 'nn.Module') - -function SpatialCrossMapLRN:__init(size, alpha, beta, k) - parent.__init(self) - - self.size = size - self.alpha = alpha or 0.0001 - self.beta = beta or 0.75 - self.k = k or 1 -end - -function SpatialCrossMapLRN:updateOutput(input) - assert(input:dim() == 3 or input:dim() == 4, - 'Input must be 3D or 4D') - - self.scale = self.scale or input.new() - - if torch.typename(input):find('torch%.Cuda.*Tensor') then - input.THNN.SpatialCrossMapLRN_updateOutput( - input:cdata(), - self.output:cdata(), - self.scale:cdata(), - self.size, - self.alpha, - self.beta, - self.k - ) - else - local isBatch = true - if input:dim() == 3 then - input = nn.utils.addSingletonDimension(input) - isBatch = false - end - - local batchSize = input:size(1) - local channels = input:size(2) - local inputHeight = input:size(3) - local inputWidth = input:size(4) - - self.output:resizeAs(input) - self.scale:resizeAs(input) - - -- use output storage as temporary buffer - local inputSquare = self.output - inputSquare:pow(input, 2) - - local prePad = (self.size - 1)/2 + 1 - local prePadCrop = prePad > channels and channels or prePad - - local scaleFirst = self.scale:select(2,1) - scaleFirst:zero() - -- compute first feature map normalization - for c = 1, prePadCrop do - scaleFirst:add(inputSquare:select(2, c)) - end - - -- reuse computations for next feature maps normalization - -- by adding the next feature map and removing the previous - for c = 2, channels do - local scalePrevious = self.scale:select(2, c -1) - local scaleCurrent = self.scale:select(2, c) - scaleCurrent:copy(scalePrevious) - if c < channels - prePad + 2 then - local squareNext = inputSquare:select(2, c + prePad - 1) - scaleCurrent:add(1, squareNext) - end - if c > prePad then - local squarePrevious = inputSquare:select(2, c - prePad ) - scaleCurrent:add(-1, squarePrevious) - end - end - - self.scale:mul(self.alpha/self.size):add(self.k) - - self.output:pow(self.scale,-self.beta) - self.output:cmul(input) - - if not isBatch then - self.output = self.output[1] - end - end - - return self.output -end - -function SpatialCrossMapLRN:updateGradInput(input, gradOutput) - assert(input:dim() == 3 or input:dim() == 4, - 'Input must be 3D or 4D') - - if torch.typename(input):find('torch%.Cuda.*Tensor') then - input.THNN.SpatialCrossMapLRN_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.scale:cdata(), - self.output:cdata(), - self.size, - self.alpha, - self.beta, - self.k - ) - else - local isBatch = true - if input:dim() == 3 then - input = nn.utils.addSingletonDimension(input) - gradOutput = nn.utils.addSingletonDimension(gradOutput) - self.output = nn.utils.addSingletonDimension(self.output) - isBatch = false - end - - local batchSize = input:size(1) - local channels = input:size(2) - local inputHeight = input:size(3) - local inputWidth = input:size(4) - - self.paddedRatio = self.paddedRatio or input.new() - self.accumRatio = self.accumRatio or input.new() - self.paddedRatio:resize(channels + self.size - 1, inputHeight, inputWidth) - self.accumRatio:resize(inputHeight,inputWidth) - - local cacheRatioValue = 2*self.alpha*self.beta/self.size - local inversePrePad = self.size - (self.size - 1) / 2 - - self.gradInput:resizeAs(input) - self.gradInput:pow(self.scale,-self.beta):cmul(gradOutput) - - self.paddedRatio:zero() - local paddedRatioCenter = self.paddedRatio:narrow(1, inversePrePad, channels) - for n = 1, batchSize do - paddedRatioCenter:cmul(gradOutput[n],self.output[n]) - paddedRatioCenter:cdiv(self.scale[n]) - self.accumRatio:sum(self.paddedRatio:narrow(1,1,self.size-1), 1) - for c = 1, channels do - self.accumRatio:add(self.paddedRatio[c+self.size-1]) - self.gradInput[n][c]:addcmul(-cacheRatioValue, input[n][c], self.accumRatio) - self.accumRatio:add(-1, self.paddedRatio[c]) - end - end - - if not isBatch then - self.gradInput = self.gradInput[1] - self.output = self.output[1] - end - end - - return self.gradInput -end - - -function SpatialCrossMapLRN:clearState() - nn.utils.clear(self, 'scale', 'paddedRatio', 'accumRatio') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialDepthWiseConvolution.lua b/contrib/lua-torch/nn/SpatialDepthWiseConvolution.lua deleted file mode 100644 index 1132f04cb2..0000000000 --- a/contrib/lua-torch/nn/SpatialDepthWiseConvolution.lua +++ /dev/null @@ -1,139 +0,0 @@ -local THNN = require 'nn.THNN' -local SpatialDepthWiseConvolution, parent = torch.class('nn.SpatialDepthWiseConvolution', 'nn.Module') - -function SpatialDepthWiseConvolution:__init(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH) - parent.__init(self) - - dW = dW or 1 - dH = dH or 1 - - self.nInputPlane = nInputPlane - self.nOutputPlane = nOutputPlane - self.kW = kW - self.kH = kH - - self.dW = dW - self.dH = dH - self.padW = padW or 0 - self.padH = padH or self.padW - - self.weight = torch.Tensor(nOutputPlane, nInputPlane*kH*kW) - self.bias = torch.Tensor(nOutputPlane, nInputPlane) - self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane*kH*kW) - self.gradBias = torch.Tensor(nOutputPlane, nInputPlane) - - self:reset() -end - -function SpatialDepthWiseConvolution:noBias() - self.bias = nil - self.gradBias = nil - return self -end - -function SpatialDepthWiseConvolution:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1/math.sqrt(self.kW*self.kH*self.nInputPlane) - end - if nn.oldSeed then - self.weight:apply(function() - return torch.uniform(-stdv, stdv) - end) - self.bias:apply(function() - return torch.uniform(-stdv, stdv) - end) - else - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv) - end -end - -function SpatialDepthWiseConvolution:updateOutput(input) - assert(input.THNN, torch.type(input)..'.THNN backend not imported') - self.finput = self.finput or input.new() - self.fgradInput = self.fgradInput or input.new() - -- backward compatibility - if self.padding then - self.padW = self.padding - self.padH = self.padding - self.padding = nil - end - input.THNN.SpatialDepthWiseConvolution_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - THNN.optionalTensor(self.bias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH - ) - return self.output -end - -function SpatialDepthWiseConvolution:updateGradInput(input, gradOutput) - assert(input.THNN, torch.type(input)..'.THNN backend not imported') - if self.gradInput then - input.THNN.SpatialDepthWiseConvolution_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH - ) - return self.gradInput - end -end - -function SpatialDepthWiseConvolution:accGradParameters(input, gradOutput, scale) - assert(input.THNN, torch.type(input)..'.THNN backend not imported') - scale = scale or 1 - assert((self.bias and self.gradBias) or (self.bias == nil and self.gradBias == nil)) - input.THNN.SpatialDepthWiseConvolution_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - THNN.optionalTensor(self.gradBias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - scale - ) -end - -function SpatialDepthWiseConvolution:type(type,tensorCache) - self.finput = self.finput and torch.Tensor() - self.fgradInput = self.fgradInput and torch.Tensor() - return parent.type(self,type,tensorCache) -end - -function SpatialDepthWiseConvolution:__tostring__() - local s = string.format('%s(%d -> %d, %dx%d', torch.type(self), - self.nInputPlane, self.nOutputPlane, self.kW, self.kH) - if self.dW ~= 1 or self.dH ~= 1 or self.padW ~= 0 or self.padH ~= 0 then - s = s .. string.format(', %d,%d', self.dW, self.dH) - end - if (self.padW or self.padH) and (self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padW .. ',' .. self.padH - end - if self.bias then - return s .. ')' - else - return s .. ') without bias' - end -end - -function SpatialDepthWiseConvolution:clearState() - nn.utils.clear(self, 'finput', 'fgradInput', '_input', '_gradOutput') - return parent.clearState(self) -end - diff --git a/contrib/lua-torch/nn/SpatialDilatedConvolution.lua b/contrib/lua-torch/nn/SpatialDilatedConvolution.lua deleted file mode 100644 index a0590c7e95..0000000000 --- a/contrib/lua-torch/nn/SpatialDilatedConvolution.lua +++ /dev/null @@ -1,80 +0,0 @@ -local THNN = require 'nn.THNN' -local SpatialDilatedConvolution, parent = torch.class('nn.SpatialDilatedConvolution', 'nn.SpatialConvolution') - -function SpatialDilatedConvolution:__init(nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH, dilationW, dilationH) - parent.__init(self, nInputPlane, nOutputPlane, kW, kH, dW, dH, padW, padH) - - self.dilationW = dilationW or 1 - self.dilationH = dilationH or 1 -end - -function SpatialDilatedConvolution:updateOutput(input) - self.finput = self.finput or self.weight.new() - self.fgradInput = self.fgradInput or self.weight.new() - input.THNN.SpatialDilatedConvolution_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - THNN.optionalTensor(self.bias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.dilationW, self.dilationH - ) - return self.output -end - -function SpatialDilatedConvolution:updateGradInput(input, gradOutput) - if self.gradInput then - self.fgradInput = self.fgradInput or self.weight.new() - input.THNN.SpatialDilatedConvolution_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.dilationW, self.dilationH - ) - return self.gradInput - end -end - -function SpatialDilatedConvolution:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - self.fgradInput = self.fgradInput or self.weight.new() - input.THNN.SpatialDilatedConvolution_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - THNN.optionalTensor(self.gradBias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.dilationW, self.dilationH, - scale - ) -end - -function SpatialDilatedConvolution:__tostring__() - local s = string.format('%s(%d -> %d, %dx%d', torch.type(self), - self.nInputPlane, self.nOutputPlane, self.kW, self.kH) - if self.dW ~= 1 or self.dH ~= 1 or self.padW ~= 0 or self.padH ~= 0 then - s = s .. string.format(', %d,%d', self.dW, self.dH) - end - if (self.padW or self.padH) and (self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padW .. ',' .. self.padH - end - s = s .. ', ' .. self.dilationW .. ',' .. self.dilationH - if self.bias then - return s .. ')' - else - return s .. ') without bias' - end -end diff --git a/contrib/lua-torch/nn/SpatialDilatedMaxPooling.lua b/contrib/lua-torch/nn/SpatialDilatedMaxPooling.lua deleted file mode 100644 index 34525a4ad0..0000000000 --- a/contrib/lua-torch/nn/SpatialDilatedMaxPooling.lua +++ /dev/null @@ -1,67 +0,0 @@ -local THNN = require 'nn.THNN' -local SpatialDilatedMaxPooling, parent = torch.class('nn.SpatialDilatedMaxPooling', 'nn.SpatialMaxPooling') - -function SpatialDilatedMaxPooling:__init(kW, kH, dW, dH, padW, padH, dilationW, dilationH) - parent.__init(self, kW, kH, dW, dH, padW, padH) - - self.dilationW = dilationW or 1 - self.dilationH = dilationH or 1 -end - -function SpatialDilatedMaxPooling:updateOutput(input) - self.indices = self.indices or torch.LongTensor() - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.indices = torch.CudaLongTensor and self.indices:cudaLong() or self.indices - else - self.indices = self.indices:long() - end - - local dims = input:dim() - self.iheight = input:size(dims-1) - self.iwidth = input:size(dims) - - input.THNN.SpatialDilatedMaxPooling_updateOutput( - input:cdata(), - self.output:cdata(), - self.indices:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.dilationW, self.dilationH, - self.ceil_mode - ) - return self.output -end - -function SpatialDilatedMaxPooling:updateGradInput(input, gradOutput) - input.THNN.SpatialDilatedMaxPooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.indices:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.dilationW, self.dilationH, - self.ceil_mode - ) - return self.gradInput -end - -function SpatialDilatedMaxPooling:__tostring__() - local s = string.format('%s(%dx%d, %d,%d', torch.type(self), - self.kW, self.kH, self.dW, self.dH) - if (self.padW or self.padH) and (self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padW .. ','.. self.padH - end - s = s .. ', ' .. self.dilationW .. ',' .. self.dilationH - s = s .. ')' - return s -end - -function SpatialDilatedMaxPooling:clearState() - if self.indices then - self.indices:set() - end - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialDivisiveNormalization.lua b/contrib/lua-torch/nn/SpatialDivisiveNormalization.lua deleted file mode 100644 index dc2b8c5301..0000000000 --- a/contrib/lua-torch/nn/SpatialDivisiveNormalization.lua +++ /dev/null @@ -1,136 +0,0 @@ -local SpatialDivisiveNormalization, parent = torch.class('nn.SpatialDivisiveNormalization','nn.Module') - -function SpatialDivisiveNormalization:__init(nInputPlane, kernel, threshold, thresval) - parent.__init(self) - - -- get args - self.nInputPlane = nInputPlane or 1 - self.kernel = kernel or torch.Tensor(9,9):fill(1) - self.threshold = threshold or 1e-4 - self.thresval = thresval or threshold or 1e-4 - local kdim = self.kernel:nDimension() - - -- check args - if kdim ~= 2 and kdim ~= 1 then - error(' averaging kernel must be 2D or 1D') - end - if (self.kernel:size(1) % 2) == 0 or (kdim == 2 and (self.kernel:size(2) % 2) == 0) then - error(' averaging kernel must have ODD dimensions') - end - - -- padding values - local padH = math.floor(self.kernel:size(1)/2) - local padW = padH - if kdim == 2 then - padW = math.floor(self.kernel:size(2)/2) - end - - -- create convolutional mean estimator - self.meanestimator = nn.Sequential() - self.meanestimator:add(nn.SpatialZeroPadding(padW, padW, padH, padH)) - if kdim == 2 then - self.meanestimator:add(nn.SpatialConvolution(self.nInputPlane, 1, self.kernel:size(2), self.kernel:size(1))) - else - self.meanestimator:add(nn.SpatialConvolutionMap(nn.tables.oneToOne(self.nInputPlane), self.kernel:size(1), 1)) - self.meanestimator:add(nn.SpatialConvolution(self.nInputPlane, 1, 1, self.kernel:size(1))) - end - self.meanestimator:add(nn.Replicate(self.nInputPlane,1,3)) - - -- create convolutional std estimator - self.stdestimator = nn.Sequential() - self.stdestimator:add(nn.Square()) - self.stdestimator:add(nn.SpatialZeroPadding(padW, padW, padH, padH)) - if kdim == 2 then - self.stdestimator:add(nn.SpatialConvolution(self.nInputPlane, 1, self.kernel:size(2), self.kernel:size(1))) - else - self.stdestimator:add(nn.SpatialConvolutionMap(nn.tables.oneToOne(self.nInputPlane), self.kernel:size(1), 1)) - self.stdestimator:add(nn.SpatialConvolution(self.nInputPlane, 1, 1, self.kernel:size(1))) - end - self.stdestimator:add(nn.Replicate(self.nInputPlane,1,3)) - self.stdestimator:add(nn.Sqrt()) - - -- set kernel and bias - if kdim == 2 then - self.kernel:div(self.kernel:sum() * self.nInputPlane) - for i = 1,self.nInputPlane do - self.meanestimator.modules[2].weight[1][i] = self.kernel - self.stdestimator.modules[3].weight[1][i] = self.kernel - end - self.meanestimator.modules[2].bias:zero() - self.stdestimator.modules[3].bias:zero() - else - self.kernel:div(self.kernel:sum() * math.sqrt(self.nInputPlane)) - for i = 1,self.nInputPlane do - self.meanestimator.modules[2].weight[i]:copy(self.kernel) - self.meanestimator.modules[3].weight[1][i]:copy(self.kernel) - self.stdestimator.modules[3].weight[i]:copy(self.kernel) - self.stdestimator.modules[4].weight[1][i]:copy(self.kernel) - end - self.meanestimator.modules[2].bias:zero() - self.meanestimator.modules[3].bias:zero() - self.stdestimator.modules[3].bias:zero() - self.stdestimator.modules[4].bias:zero() - end - - -- other operation - self.normalizer = nn.CDivTable() - self.divider = nn.CDivTable() - self.thresholder = nn.Threshold(self.threshold, self.thresval) - - -- coefficient array, to adjust side effects - self.coef = torch.Tensor(1,1,1) -end - -function SpatialDivisiveNormalization:updateOutput(input) - - self.localstds = self.stdestimator:updateOutput(input) - - -- compute side coefficients - local dim = input:dim() - if self.localstds:dim() ~= self.coef:dim() or (input:size(dim) ~= self.coef:size(dim)) or (input:size(dim-1) ~= self.coef:size(dim-1)) then - self.ones = self.ones or input.new() - if dim == 4 then - -- batch mode - self.ones:resizeAs(input[1]):fill(1) - local coef = self.meanestimator:updateOutput(self.ones) - self._coef = self._coef or input.new() - self._coef:resizeAs(coef):copy(coef) -- make contiguous for view - self.coef = self._coef:view(1,table.unpack(self._coef:size():totable())):expandAs(self.localstds) - else - self.ones:resizeAs(input):fill(1) - self.coef = self.meanestimator:updateOutput(self.ones) - end - - end - - -- normalize std dev - self.adjustedstds = self.divider:updateOutput{self.localstds, self.coef} - self.thresholdedstds = self.thresholder:updateOutput(self.adjustedstds) - self.output = self.normalizer:updateOutput{input, self.thresholdedstds} - - -- done - return self.output -end - -function SpatialDivisiveNormalization:updateGradInput(input, gradOutput) - -- resize grad - self.gradInput:resizeAs(input):zero() - - -- backprop through all modules - local gradnorm = self.normalizer:updateGradInput({input, self.thresholdedstds}, gradOutput) - local gradadj = self.thresholder:updateGradInput(self.adjustedstds, gradnorm[2]) - local graddiv = self.divider:updateGradInput({self.localstds, self.coef}, gradadj) - self.gradInput:add(self.stdestimator:updateGradInput(input, graddiv[1])) - self.gradInput:add(gradnorm[1]) - - -- done - return self.gradInput -end - -function SpatialDivisiveNormalization:clearState() - if self.ones then self.ones:set() end - if self._coef then self._coef:set() end - self.meanestimator:clearState() - self.stdestimator:clearState() - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialDropout.lua b/contrib/lua-torch/nn/SpatialDropout.lua deleted file mode 100644 index 4320061b7c..0000000000 --- a/contrib/lua-torch/nn/SpatialDropout.lua +++ /dev/null @@ -1,55 +0,0 @@ -local SpatialDropout, Parent = torch.class('nn.SpatialDropout', 'nn.Module') - -function SpatialDropout:__init(p,stochasticInference) - Parent.__init(self) - self.p = p or 0.5 - self.train = true - self.stochastic_inference = stochasticInference or false - self.noise = torch.Tensor() -end - -function SpatialDropout:updateOutput(input) - self.output:resizeAs(input):copy(input) - if self.train or self.stochastic_inference then - if input:dim() == 4 then - self.noise:resize(input:size(1), input:size(2), 1, 1) - elseif input:dim() == 3 then - self.noise:resize(input:size(1), 1, 1) - else - error('Input must be 4D (nbatch, nfeat, h, w) or 3D (nfeat, h, w)') - end - self.noise:bernoulli(1-self.p) - -- We expand the random dropouts to the entire feature map because the - -- features are likely correlated across the map and so the dropout - -- should also be correlated. - self.output:cmul(torch.expandAs(self.noise, input)) - else - self.output:mul(1-self.p) - end - return self.output -end - -function SpatialDropout:updateGradInput(input, gradOutput) - if self.train then - self.gradInput:resizeAs(gradOutput):copy(gradOutput) - self.gradInput:cmul(torch.expandAs(self.noise, input)) -- simply mask the gradients with the noise vector - else - error('backprop only defined while training') - end - return self.gradInput -end - -function SpatialDropout:setp(p) - self.p = p -end - -function SpatialDropout:__tostring__() - return string.format('%s(%f)', torch.type(self), self.p) -end - -function SpatialDropout:clearState() - if self.noise then - self.noise:set() - end - return Parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialFractionalMaxPooling.lua b/contrib/lua-torch/nn/SpatialFractionalMaxPooling.lua deleted file mode 100644 index 884751d414..0000000000 --- a/contrib/lua-torch/nn/SpatialFractionalMaxPooling.lua +++ /dev/null @@ -1,165 +0,0 @@ -local SpatialFractionalMaxPooling, parent = - torch.class('nn.SpatialFractionalMaxPooling', 'nn.Module') - --- Usage: --- nn.SpatialFractionalMaxPooling(poolSizeW, poolSizeH, outW, outH) --- the output should be the exact size (outH x outW) --- nn.SpatialFractionalMaxPooling(poolSizeW, poolSizeH, ratioW, ratioH) --- the output should be the size (floor(inH x ratioH) x floor(inW x ratioW)) --- ratios are numbers between (0, 1) exclusive -function SpatialFractionalMaxPooling:__init(poolSizeW, poolSizeH, arg1, arg2) - parent.__init(self) - assert(poolSizeW >= 2) - assert(poolSizeH >= 2) - - -- Pool size (how wide the pooling for each output unit is) - self.poolSizeW = poolSizeW - self.poolSizeH = poolSizeH - - -- Random samples are drawn for all - -- batch * plane * (height, width; i.e., 2) points. This determines - -- the 2d "pseudorandom" overlapping pooling regions for each - -- (batch element x input plane). A new set of random samples is - -- drawn every updateOutput call, unless we disable it via - -- :fixPoolingRegions(). - self.randomSamples = nil - - -- Flag to disable re-generation of random samples for producing - -- a new pooling. For testing purposes - self.newRandomPool = false - - if arg1 >= 1 and arg2 >= 1 then - -- Desired output size: the input tensor will determine the reduction - -- ratio - self.outW = arg1 - self.outH = arg2 - else - -- Reduction ratio specified per each input - -- This is the reduction ratio that we use - self.ratioW = arg1 - self.ratioH = arg2 - - -- The reduction ratio must be between 0 and 1 - assert(self.ratioW > 0 and self.ratioW < 1) - assert(self.ratioH > 0 and self.ratioH < 1) - end -end - -function SpatialFractionalMaxPooling:getBufferSize_(input) - local batchSize = 0 - local planeSize = 0 - - if input:nDimension() == 3 then - batchSize = 1 - planeSize = input:size(1) - elseif input:nDimension() == 4 then - batchSize = input:size(1) - planeSize = input:size(2) - else - error('input must be dim 3 or 4') - end - - return torch.LongStorage({batchSize, planeSize, 2}) -end - -function SpatialFractionalMaxPooling:initSampleBuffer_(input) - local sampleBufferSize = self:getBufferSize_(input) - - if self.randomSamples == nil then - self.randomSamples = input.new():resize(sampleBufferSize):uniform() - elseif (self.randomSamples:size(1) ~= sampleBufferSize[1] or - self.randomSamples:size(2) ~= sampleBufferSize[2]) then - self.randomSamples:resize(sampleBufferSize):uniform() - else - if not self.newRandomPool then - -- Create new pooling windows, since this is a subsequent call - self.randomSamples:uniform() - end - end -end - -function SpatialFractionalMaxPooling:getOutputSizes_(input) - local outW = self.outW - local outH = self.outH - if self.ratioW ~= nil and self.ratioH ~= nil then - if input:nDimension() == 4 then - outW = math.floor(input:size(4) * self.ratioW) - outH = math.floor(input:size(3) * self.ratioH) - elseif input:nDimension() == 3 then - outW = math.floor(input:size(3) * self.ratioW) - outH = math.floor(input:size(2) * self.ratioH) - else - error('input must be dim 3 or 4') - end - - -- Neither can be smaller than 1 - assert(outW > 0, 'reduction ratio or input width too small') - assert(outH > 0, 'reduction ratio or input height too small') - else - assert(outW ~= nil and outH ~= nil) - end - - return outW, outH -end - --- Call this to turn off regeneration of random pooling regions each --- updateOutput call. -function SpatialFractionalMaxPooling:fixPoolingRegions(val) - if val == nil then - val = true - end - - self.newRandomPool = val - return self -end - -function SpatialFractionalMaxPooling:updateOutput(input) - self.indices = self.indices or torch.LongTensor() - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.indices = torch.CudaLongTensor and self.indices:cudaLong() or self.indices - else - self.indices = self.indices:long() - end - self:initSampleBuffer_(input) - local outW, outH = self:getOutputSizes_(input) - - input.THNN.SpatialFractionalMaxPooling_updateOutput( - input:cdata(), - self.output:cdata(), - outW, outH, self.poolSizeW, self.poolSizeH, - self.indices:cdata(), self.randomSamples:cdata()) - return self.output -end - -function SpatialFractionalMaxPooling:updateGradInput(input, gradOutput) - assert(self.randomSamples ~= nil, - 'must call updateOutput/forward first') - - local outW, outH = self:getOutputSizes_(input) - - input.THNN.SpatialFractionalMaxPooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - outW, outH, self.poolSizeW, self.poolSizeH, - self.indices:cdata()) - return self.gradInput -end - --- backward compat -function SpatialFractionalMaxPooling:empty() - self:clearState() -end - -function SpatialFractionalMaxPooling:clearState() - self.indices = nil - self.randomSamples = nil - return parent.clearState(self) -end - -function SpatialFractionalMaxPooling:__tostring__() - return string.format('%s(%dx%d, %d,%d)', torch.type(self), - self.outW and self.outW or self.ratioW, - self.outH and self.outH or self.ratioH, - self.poolSizeW, self.poolSizeH) -end diff --git a/contrib/lua-torch/nn/SpatialFullConvolution.lua b/contrib/lua-torch/nn/SpatialFullConvolution.lua deleted file mode 100644 index e6019bc18b..0000000000 --- a/contrib/lua-torch/nn/SpatialFullConvolution.lua +++ /dev/null @@ -1,219 +0,0 @@ -local THNN = require 'nn.THNN' -local SpatialFullConvolution, parent = torch.class('nn.SpatialFullConvolution','nn.Module') - -function SpatialFullConvolution:__init(nInputPlane, nOutputPlane, - kW, kH, dW, dH, padW, padH, adjW, adjH) - parent.__init(self) - - dW = dW or 1 - dH = dH or 1 - - self.nInputPlane = nInputPlane - self.nOutputPlane = nOutputPlane - self.kW = kW - self.kH = kH - self.dW = dW - self.dH = dH - self.padW = padW or 0 - self.padH = padH or 0 - self.adjW = adjW or 0 - self.adjH = adjH or 0 - - if self.adjW > self.dW - 1 or self.adjH > self.dH - 1 then - error('adjW and adjH must be smaller than self.dW - 1' .. - ' and self.dH - 1 respectively') - end - - self.weight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW) - self.gradWeight = torch.Tensor(nInputPlane, nOutputPlane, kH, kW) - self.bias = torch.Tensor(self.nOutputPlane) - self.gradBias = torch.Tensor(self.nOutputPlane) - - self.ones = torch.Tensor() - - self:reset() -end - -function SpatialFullConvolution:noBias() - self.bias = nil - self.gradBias = nil - return self -end - -function SpatialFullConvolution:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - local nInputPlane = self.nInputPlane - local kH = self.kH - local kW = self.kW - stdv = 1/math.sqrt(kW*kH*nInputPlane) - end - self.weight:uniform(-stdv, stdv) - if self.bias then - self.bias:uniform(-stdv, stdv) - end -end - -local function calculateAdj(targetSize, ker, pad, stride) - return (targetSize + 2 * pad - ker) % stride -end - -function SpatialFullConvolution:backCompatibility() - self.adjW = self.adjW or 0 - self.adjH = self.adjH or 0 -end - -function SpatialFullConvolution:updateOutput(input) - self:backCompatibility() - - local inputTensor = input - local adjW, adjH = self.adjW, self.adjH - - -- The input can be a table where the second element indicates the target - -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then - inputTensor = input[1] - local targetTensor = input[2] - local tDims = targetTensor:dim() - local tH = targetTensor:size(tDims-1) - local tW = targetTensor:size(tDims) - adjW = calculateAdj(tW, self.kW, self.padW, self.dW) - adjH = calculateAdj(tH, self.kH, self.padH, self.dH) - self.finput = self.finput or input[1].new() - self.fgradInput = self.fgradInput or input[1].new() - else - self.finput = self.finput or input.new() - self.fgradInput = self.fgradInput or input.new() - end - - inputTensor.THNN.SpatialFullConvolution_updateOutput( - inputTensor:cdata(), - self.output:cdata(), - self.weight:cdata(), - THNN.optionalTensor(self.bias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - adjW, adjH - ) - - return self.output -end - -function SpatialFullConvolution:updateGradInput(input, gradOutput) - self:backCompatibility() - - if self.gradInput then - - local inputTensor = input - local adjW, adjH = self.adjW, self.adjH - - -- The input can be a table where the second element indicates the target - -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then - inputTensor = input[1] - local targetTensor = input[2] - local tDims = targetTensor:dim() - local tH = targetTensor:size(tDims-1) - local tW = targetTensor:size(tDims) - adjW = calculateAdj(tW, self.kW, self.padW, self.dW) - adjH = calculateAdj(tH, self.kH, self.padH, self.dH) - -- Momentarily extract the gradInput tensor - if type(self.gradInput) == 'table' then - self.gradInput = self.gradInput[1] or inputTensor.new() - end - end - - inputTensor.THNN.SpatialFullConvolution_updateGradInput( - inputTensor:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - adjW, adjH - ) - - if type(input) == 'table' then - -- Create a zero tensor to be expanded and used as gradInput[2]. - self.zeroScalar = self.zeroScalar or input[2].new(1):zero() - self.ones:resize(input[2]:dim()):fill(1) - local zeroTensor = self.zeroScalar - :view(table.unpack(self.ones:totable())) - :expandAs(input[2]) - self.gradInput = {self.gradInput, zeroTensor} - end - - return self.gradInput - end -end - -function SpatialFullConvolution:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - self:backCompatibility() - - local inputTensor = input - local adjW, adjH = self.adjW, self.adjH - - -- The input can be a table where the second element indicates the target - -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then - inputTensor = input[1] - local targetTensor = input[2] - local tDims = targetTensor:dim() - local tH = targetTensor:size(tDims-1) - local tW = targetTensor:size(tDims) - adjW = calculateAdj(tW, self.kW, self.padW, self.dW) - adjH = calculateAdj(tH, self.kH, self.padH, self.dH) - end - - inputTensor.THNN.SpatialFullConvolution_accGradParameters( - inputTensor:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - THNN.optionalTensor(self.gradBias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - adjW, adjH, - scale - ) -end - -function SpatialFullConvolution:type(type, tensorCache) - self.finput = self.finput and torch.Tensor() - self.fgradInput = self.fgradInput and torch.Tensor() - return parent.type(self, type, tensorCache) -end - -function SpatialFullConvolution:__tostring__() - local s = string.format('%s(%d -> %d, %dx%d', torch.type(self), - self.nInputPlane, self.nOutputPlane, self.kW, self.kH) - if self.dW ~= 1 or self.dH ~= 1 or self.padW ~= 0 or self.padH ~= 0 then - s = s .. string.format(', %d,%d', self.dW, self.dH) - end - if (self.padW or self.padH) and (self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padW .. ',' .. self.padH - end - if (self.adjW or self.adjH) and (self.adjW ~= 0 or self.adjH ~= 0) then - s = s .. ', ' .. self.adjW .. ',' .. self.adjH - end - if self.bias then - return s .. ')' - else - return s .. ') without bias' - end -end - -function SpatialFullConvolution:clearState() - nn.utils.clear(self, 'finput', 'fgradInput', '_input', '_gradOutput') - return parent.clearState(self) -end - diff --git a/contrib/lua-torch/nn/SpatialFullConvolutionMap.lua b/contrib/lua-torch/nn/SpatialFullConvolutionMap.lua deleted file mode 100644 index 008f5e7cf0..0000000000 --- a/contrib/lua-torch/nn/SpatialFullConvolutionMap.lua +++ /dev/null @@ -1,91 +0,0 @@ -local SpatialFullConvolutionMap, parent = torch.class('nn.SpatialFullConvolutionMap', 'nn.Module') - -function SpatialFullConvolutionMap:__init(conMatrix, kW, kH, dW, dH) - parent.__init(self) - - dW = dW or 1 - dH = dH or 1 - - self.kW = kW - self.kH = kH - self.dW = dW - self.dH = dH - self.connTable = conMatrix - self.nInputPlane = self.connTable:select(2,1):max() - self.nOutputPlane = self.connTable:select(2,2):max() - - self.weight = torch.Tensor(self.connTable:size(1), kH, kW) - self.gradWeight = torch.Tensor(self.connTable:size(1), kH, kW) - - self.bias = torch.Tensor(self.nOutputPlane) - self.gradBias = torch.Tensor(self.nOutputPlane) - - self:reset() -end - -function SpatialFullConvolutionMap:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - self.weight:apply(function() - return torch.uniform(-stdv, stdv) - end) - self.bias:apply(function() - return torch.uniform(-stdv, stdv) - end) - else - local ninp = torch.Tensor(self.nOutputPlane):zero() - for i=1,self.connTable:size(1) do ninp[self.connTable[i][2]] = ninp[self.connTable[i][2]]+1 end - for k=1,self.connTable:size(1) do - stdv = 1/math.sqrt(self.kW*self.kH*ninp[self.connTable[k][2]]) - self.weight:select(1,k):apply(function() return torch.uniform(-stdv,stdv) end) - end - for k=1,self.bias:size(1) do - stdv = 1/math.sqrt(self.kW*self.kH*ninp[k]) - self.bias[k] = torch.uniform(-stdv,stdv) - end - - end -end - -function SpatialFullConvolutionMap:updateOutput(input) - input.THNN.SpatialFullConvolutionMap_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.connTable:cdata(), - self.nInputPlane, - self.nOutputPlane, - self.dW, self.dH - ) - return self.output -end - -function SpatialFullConvolutionMap:updateGradInput(input, gradOutput) - input.THNN.SpatialFullConvolutionMap_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.connTable:cdata(), - self.nInputPlane, - self.nOutputPlane, - self.dW, self.dH - ) - return self.gradInput -end - -function SpatialFullConvolutionMap:accGradParameters(input, gradOutput, scale) - input.THNN.SpatialFullConvolutionMap_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.connTable:cdata(), - self.nInputPlane, - self.nOutputPlane, - self.dW, self.dH, - scale or 1 - ) -end diff --git a/contrib/lua-torch/nn/SpatialLPPooling.lua b/contrib/lua-torch/nn/SpatialLPPooling.lua deleted file mode 100644 index 49a8493cf2..0000000000 --- a/contrib/lua-torch/nn/SpatialLPPooling.lua +++ /dev/null @@ -1,43 +0,0 @@ -local SpatialLPPooling, parent = torch.class('nn.SpatialLPPooling', 'nn.Sequential') - -function SpatialLPPooling:__init(nInputPlane, pnorm, kW, kH, dW, dH) - parent.__init(self) - - dW = dW or kW - dH = dH or kH - - self.kW = kW - self.kH = kH - self.dW = dW - self.dH = dH - - if pnorm == 2 then - self:add(nn.Square()) - else - self:add(nn.Power(pnorm)) - end - self:add(nn.SpatialAveragePooling(kW, kH, dW, dH)) - self:add(nn.MulConstant(kW*kH)) - if pnorm == 2 then - self:add(nn.Sqrt()) - else - self:add(nn.Power(1/pnorm)) - end -end - --- the module is a Sequential: by default, it'll try to learn the parameters --- of the sub sampler: we avoid that by redefining its methods. -function SpatialLPPooling:reset() -end - -function SpatialLPPooling:accGradParameters() -end - -function SpatialLPPooling:accUpdateGradParameters() -end - -function SpatialLPPooling:zeroGradParameters() -end - -function SpatialLPPooling:updateParameters() -end diff --git a/contrib/lua-torch/nn/SpatialLogSoftMax.lua b/contrib/lua-torch/nn/SpatialLogSoftMax.lua deleted file mode 100644 index 9c81d49e1a..0000000000 --- a/contrib/lua-torch/nn/SpatialLogSoftMax.lua +++ /dev/null @@ -1,19 +0,0 @@ -local SpatialLogSoftMax = torch.class('nn.SpatialLogSoftMax', 'nn.Module') - -function SpatialLogSoftMax:updateOutput(input) - input.THNN.LogSoftMax_updateOutput( - input:cdata(), - self.output:cdata() - ) - return self.output -end - -function SpatialLogSoftMax:updateGradInput(input, gradOutput) - input.THNN.LogSoftMax_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.output:cdata() - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SpatialMaxPooling.lua b/contrib/lua-torch/nn/SpatialMaxPooling.lua deleted file mode 100644 index 5c865c631f..0000000000 --- a/contrib/lua-torch/nn/SpatialMaxPooling.lua +++ /dev/null @@ -1,94 +0,0 @@ -local SpatialMaxPooling, parent = torch.class('nn.SpatialMaxPooling', 'nn.Module') - -function SpatialMaxPooling:__init(kW, kH, dW, dH, padW, padH) - parent.__init(self) - - dW = dW or kW - dH = dH or kH - - self.kW = kW - self.kH = kH - self.dW = dW - self.dH = dH - - self.padW = padW or 0 - self.padH = padH or 0 - - self.ceil_mode = false - self.indices = torch.LongTensor() -end - -function SpatialMaxPooling:ceil() - self.ceil_mode = true - return self -end - -function SpatialMaxPooling:floor() - self.ceil_mode = false - return self -end - -function SpatialMaxPooling:updateOutput(input) - self.indices = self.indices or torch.LongTensor() - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.indices = torch.CudaLongTensor and self.indices:cudaLong() or self.indices - else - self.indices = self.indices:long() - end - - local dims = input:dim() - self.iheight = input:size(dims-1) - self.iwidth = input:size(dims) - - -- backward compatibility - self.ceil_mode = self.ceil_mode or false - self.padW = self.padW or 0 - self.padH = self.padH or 0 - input.THNN.SpatialMaxPooling_updateOutput( - input:cdata(), - self.output:cdata(), - self.indices:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.ceil_mode - ) - return self.output -end - -function SpatialMaxPooling:updateGradInput(input, gradOutput) - input.THNN.SpatialMaxPooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.indices:cdata(), - self.kW, self.kH, - self.dW, self.dH, - self.padW, self.padH, - self.ceil_mode - ) - return self.gradInput -end - --- for backward compat -function SpatialMaxPooling:empty() - self:clearState() -end - -function SpatialMaxPooling:__tostring__() - local s = string.format('%s(%dx%d, %d,%d', torch.type(self), - self.kW, self.kH, self.dW, self.dH) - if (self.padW or self.padH) and (self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padW .. ','.. self.padH - end - s = s .. ')' - - return s -end - -function SpatialMaxPooling:clearState() - if self.indices then - self.indices:set() - end - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialMaxUnpooling.lua b/contrib/lua-torch/nn/SpatialMaxUnpooling.lua deleted file mode 100644 index 408bcc0521..0000000000 --- a/contrib/lua-torch/nn/SpatialMaxUnpooling.lua +++ /dev/null @@ -1,45 +0,0 @@ -local SpatialMaxUnpooling, parent = torch.class('nn.SpatialMaxUnpooling', 'nn.Module') - -function SpatialMaxUnpooling:__init(poolingModule) - parent.__init(self) - assert(torch.type(poolingModule)=='nn.SpatialMaxPooling', 'Argument must be a nn.SpatialMaxPooling module') - assert(poolingModule.kH==poolingModule.dH and poolingModule.kW==poolingModule.dW, "The size of pooling module's kernel must be equal to its stride") - self.pooling = poolingModule -end - -function SpatialMaxUnpooling:setParams() - self.indices = self.pooling.indices - self.oheight = self.pooling.iheight - self.owidth = self.pooling.iwidth -end - -function SpatialMaxUnpooling:updateOutput(input) - self:setParams() - input.THNN.SpatialMaxUnpooling_updateOutput( - input:cdata(), - self.output:cdata(), - self.indices:cdata(), - self.owidth, self.oheight - ) - return self.output -end - -function SpatialMaxUnpooling:updateGradInput(input, gradOutput) - self:setParams() - input.THNN.SpatialMaxUnpooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.indices:cdata(), - self.owidth, self.oheight - ) - return self.gradInput -end - -function SpatialMaxUnpooling:empty() - self:clearState() -end - -function SpatialMaxUnpooling:__tostring__() - return 'nn.SpatialMaxUnpooling associated to '..tostring(self.pooling) -end diff --git a/contrib/lua-torch/nn/SpatialReflectionPadding.lua b/contrib/lua-torch/nn/SpatialReflectionPadding.lua deleted file mode 100644 index 9ce4612ad6..0000000000 --- a/contrib/lua-torch/nn/SpatialReflectionPadding.lua +++ /dev/null @@ -1,51 +0,0 @@ -local SpatialReflectionPadding, parent = - torch.class('nn.SpatialReflectionPadding', 'nn.Module') - -function SpatialReflectionPadding:__init(pad_l, pad_r, pad_t, pad_b) - parent.__init(self) - self.pad_l = pad_l - self.pad_r = pad_r or self.pad_l - self.pad_t = pad_t or self.pad_l - self.pad_b = pad_b or self.pad_l -end - -function SpatialReflectionPadding:updateOutput(input) - if input:dim() == 3 or input:dim() == 4 then - input.THNN.SpatialReflectionPadding_updateOutput( - input:cdata(), self.output:cdata(), - self.pad_l, self.pad_r, self.pad_t, self.pad_b) - else - error('input must be 3 or 4-dimensional') - end - return self.output -end - -function SpatialReflectionPadding:updateGradInput(input, gradOutput) - if input:dim() == 3 and gradOutput:dim() == 3 then - assert(input:size(1) == gradOutput:size(1) - and input:size(2) + self.pad_t + self.pad_b == gradOutput:size(2) - and input:size(3) + self.pad_l + self.pad_r == gradOutput:size(3), - 'input and gradOutput must be compatible in size') - elseif input:dim() == 4 and gradOutput:dim() == 4 then - assert(input:size(1) == gradOutput:size(1) - and input:size(2) == gradOutput:size(2) - and input:size(3) + self.pad_t + self.pad_b == gradOutput:size(3) - and input:size(4) + self.pad_l + self.pad_r == gradOutput:size(4), - 'input and gradOutput must be compatible in size') - else - error( - [[input and gradOutput must be 3 or 4-dimensional - and have equal number of dimensions]] - ) - end - input.THNN.SpatialReflectionPadding_updateGradInput( - input:cdata(), gradOutput:cdata(), self.gradInput:cdata(), - self.pad_l, self.pad_r, self.pad_t, self.pad_b) - return self.gradInput -end - -function SpatialReflectionPadding:__tostring__() - return torch.type(self) .. - string.format('(l=%d, r=%d, t=%d, b=%d)', self.pad_l, self.pad_r, - self.pad_t, self.pad_b) -end diff --git a/contrib/lua-torch/nn/SpatialReplicationPadding.lua b/contrib/lua-torch/nn/SpatialReplicationPadding.lua deleted file mode 100644 index 429763f9bb..0000000000 --- a/contrib/lua-torch/nn/SpatialReplicationPadding.lua +++ /dev/null @@ -1,51 +0,0 @@ -local SpatialReplicationPadding, parent = - torch.class('nn.SpatialReplicationPadding', 'nn.Module') - -function SpatialReplicationPadding:__init(pad_l, pad_r, pad_t, pad_b) - parent.__init(self) - self.pad_l = pad_l - self.pad_r = pad_r or self.pad_l - self.pad_t = pad_t or self.pad_l - self.pad_b = pad_b or self.pad_l -end - -function SpatialReplicationPadding:updateOutput(input) - if input:dim() == 3 or input:dim() == 4 then - input.THNN.SpatialReplicationPadding_updateOutput( - input:cdata(), self.output:cdata(), - self.pad_l, self.pad_r, self.pad_t, self.pad_b) - else - error('input must be 3 or 4-dimensional') - end - return self.output -end - -function SpatialReplicationPadding:updateGradInput(input, gradOutput) - if input:dim() == 3 and gradOutput:dim() == 3 then - assert(input:size(1) == gradOutput:size(1) - and input:size(2) + self.pad_t + self.pad_b == gradOutput:size(2) - and input:size(3) + self.pad_l + self.pad_r == gradOutput:size(3), - 'input and gradOutput must be compatible in size') - elseif input:dim() == 4 and gradOutput:dim() == 4 then - assert(input:size(1) == gradOutput:size(1) - and input:size(2) == gradOutput:size(2) - and input:size(3) + self.pad_t + self.pad_b == gradOutput:size(3) - and input:size(4) + self.pad_l + self.pad_r == gradOutput:size(4), - 'input and gradOutput must be compatible in size') - else - error( - [[input and gradOutput must be 3 or 4-dimensional - and have equal number of dimensions]] - ) - end - input.THNN.SpatialReplicationPadding_updateGradInput( - input:cdata(), gradOutput:cdata(), self.gradInput:cdata(), - self.pad_l, self.pad_r, self.pad_t, self.pad_b) - return self.gradInput -end - -function SpatialReplicationPadding:__tostring__() - return torch.type(self) .. - string.format('(l=%d, r=%d, t=%d, b=%d)', self.pad_l, self.pad_r, - self.pad_t, self.pad_b) -end diff --git a/contrib/lua-torch/nn/SpatialSoftMax.lua b/contrib/lua-torch/nn/SpatialSoftMax.lua deleted file mode 100644 index 56f0b40e2a..0000000000 --- a/contrib/lua-torch/nn/SpatialSoftMax.lua +++ /dev/null @@ -1,19 +0,0 @@ -local SpatialSoftMax, _ = torch.class('nn.SpatialSoftMax', 'nn.Module') - -function SpatialSoftMax:updateOutput(input) - input.THNN.SoftMax_updateOutput( - input:cdata(), - self.output:cdata() - ) - return self.output -end - -function SpatialSoftMax:updateGradInput(input, gradOutput) - input.THNN.SoftMax_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.output:cdata() - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SpatialSubSampling.lua b/contrib/lua-torch/nn/SpatialSubSampling.lua deleted file mode 100644 index 4e3fb88817..0000000000 --- a/contrib/lua-torch/nn/SpatialSubSampling.lua +++ /dev/null @@ -1,79 +0,0 @@ -local SpatialSubSampling, parent = torch.class('nn.SpatialSubSampling', 'nn.Module') - -function SpatialSubSampling:__init(nInputPlane, kW, kH, dW, dH) - parent.__init(self) - - dW = dW or 1 - dH = dH or 1 - - self.nInputPlane = nInputPlane - self.kW = kW - self.kH = kH - self.dW = dW - self.dH = dH - - self.weight = torch.Tensor(nInputPlane) - self.bias = torch.Tensor(nInputPlane) - self.gradWeight = torch.Tensor(nInputPlane) - self.gradBias = torch.Tensor(nInputPlane) - - self:reset() -end - -function SpatialSubSampling:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1/math.sqrt(self.kW*self.kH) - end - if nn.oldSeed then - self.weight:apply(function() - return torch.uniform(-stdv, stdv) - end) - self.bias:apply(function() - return torch.uniform(-stdv, stdv) - end) - else - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv) - end -end - -function SpatialSubSampling:updateOutput(input) - input.THNN.SpatialSubSampling_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - self.bias:cdata(), - self.kW, self.kH, - self.dW, self.dH - ) - return self.output -end - -function SpatialSubSampling:updateGradInput(input, gradOutput) - if self.gradInput then - input.THNN.SpatialSubSampling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.kW, self.kH, - self.dW, self.dH - ) - return self.gradInput - end -end - -function SpatialSubSampling:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - input.THNN.SpatialSubSampling_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - self.gradBias:cdata(), - self.kW, self.kH, - self.dW, self.dH, - scale - ) -end diff --git a/contrib/lua-torch/nn/SpatialSubtractiveNormalization.lua b/contrib/lua-torch/nn/SpatialSubtractiveNormalization.lua deleted file mode 100644 index d430083e95..0000000000 --- a/contrib/lua-torch/nn/SpatialSubtractiveNormalization.lua +++ /dev/null @@ -1,115 +0,0 @@ -local SpatialSubtractiveNormalization, parent = torch.class('nn.SpatialSubtractiveNormalization','nn.Module') - -function SpatialSubtractiveNormalization:__init(nInputPlane, kernel) - parent.__init(self) - - -- get args - self.nInputPlane = nInputPlane or 1 - self.kernel = kernel or torch.Tensor(9,9):fill(1) - local kdim = self.kernel:nDimension() - - -- check args - if kdim ~= 2 and kdim ~= 1 then - error(' averaging kernel must be 2D or 1D') - end - if (self.kernel:size(1) % 2) == 0 or (kdim == 2 and (self.kernel:size(2) % 2) == 0) then - error(' averaging kernel must have ODD dimensions') - end - - -- normalize kernel - self.kernel:div(self.kernel:sum() * self.nInputPlane) - - -- padding values - local padH = math.floor(self.kernel:size(1)/2) - local padW = padH - if kdim == 2 then - padW = math.floor(self.kernel:size(2)/2) - end - - -- create convolutional mean extractor - self.meanestimator = nn.Sequential() - self.meanestimator:add(nn.SpatialZeroPadding(padW, padW, padH, padH)) - if kdim == 2 then - self.meanestimator:add(nn.SpatialConvolution(self.nInputPlane, 1, self.kernel:size(2), self.kernel:size(1))) - else - self.meanestimator:add(nn.SpatialConvolutionMap(nn.tables.oneToOne(self.nInputPlane), self.kernel:size(1), 1)) - self.meanestimator:add(nn.SpatialConvolution(self.nInputPlane, 1, 1, self.kernel:size(1))) - end - self.meanestimator:add(nn.Replicate(self.nInputPlane,1,3)) - - -- set kernel and bias - if kdim == 2 then - for i = 1,self.nInputPlane do - self.meanestimator.modules[2].weight[1][i] = self.kernel - end - self.meanestimator.modules[2].bias:zero() - else - for i = 1,self.nInputPlane do - self.meanestimator.modules[2].weight[i]:copy(self.kernel) - self.meanestimator.modules[3].weight[1][i]:copy(self.kernel) - end - self.meanestimator.modules[2].bias:zero() - self.meanestimator.modules[3].bias:zero() - end - - -- other operation - self.subtractor = nn.CSubTable() - self.divider = nn.CDivTable() - - -- coefficient array, to adjust side effects - self.coef = torch.Tensor(1,1,1) -end - -function SpatialSubtractiveNormalization:updateOutput(input) - -- compute side coefficients - local dim = input:dim() - if input:dim()+1 ~= self.coef:dim() or (input:size(dim) ~= self.coef:size(dim)) or (input:size(dim-1) ~= self.coef:size(dim-1)) then - self.ones = self.ones or input.new() - self._coef = self._coef or self.coef.new() - if dim == 4 then - -- batch mode - self.ones:resizeAs(input[1]):fill(1) - local coef = self.meanestimator:updateOutput(self.ones) - self._coef:resizeAs(coef):copy(coef) -- make contiguous for view - local size = coef:size():totable() - table.insert(size,1,input:size(1)) - self.coef = self._coef:view(1,table.unpack(self._coef:size():totable())):expand(table.unpack(size)) - else - self.ones:resizeAs(input):fill(1) - local coef = self.meanestimator:updateOutput(self.ones) - self._coef:resizeAs(coef):copy(coef) -- copy meanestimator.output as it will be used below - self.coef = self._coef - end - - end - - -- compute mean - self.localsums = self.meanestimator:updateOutput(input) - self.adjustedsums = self.divider:updateOutput{self.localsums, self.coef} - self.output = self.subtractor:updateOutput{input, self.adjustedsums} - - -- done - return self.output -end - -function SpatialSubtractiveNormalization:updateGradInput(input, gradOutput) - -- resize grad - self.gradInput:resizeAs(input):zero() - - -- backprop through all modules - local gradsub = self.subtractor:updateGradInput({input, self.adjustedsums}, gradOutput) - local graddiv = self.divider:updateGradInput({self.localsums, self.coef}, gradsub[2]) - local size = self.meanestimator:updateGradInput(input, graddiv[1]):size() - self.gradInput:add(self.meanestimator:updateGradInput(input, graddiv[1])) - self.gradInput:add(gradsub[1]) - - -- done - return self.gradInput -end - -function SpatialSubtractiveNormalization:clearState() - if self.ones then self.ones:set() end - if self._coef then self._coef:set() end - self.meanestimator:clearState() - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/SpatialUpSamplingBilinear.lua b/contrib/lua-torch/nn/SpatialUpSamplingBilinear.lua deleted file mode 100644 index 12e1ce8f20..0000000000 --- a/contrib/lua-torch/nn/SpatialUpSamplingBilinear.lua +++ /dev/null @@ -1,139 +0,0 @@ -require 'nn.THNN' -local SpatialUpSamplingBilinear, parent = - torch.class('nn.SpatialUpSamplingBilinear', 'nn.Module') - ---[[ -Applies a 2D bilinear up-sampling over an input image composed of several -input planes. - -The Y and X dimensions are assumed to be the last 2 tensor dimensions. For -instance, if the tensor is 4D, then dim 3 is the y dimension and dim 4 is the x. - -scale_factor is assumed to be a positive integer. -owidth = (width-1)*(scale_factor-1) + width -oheight = (height-1)*(scale_factor-1) + height - -Alternatively, owidth and oheight can be directly provided as input. ---]] - -function SpatialUpSamplingBilinear:__init(params) - parent.__init(self) - - self.owidth, self.oheight, self.scale_factor = nil, nil, nil - if torch.type(params) == 'table' then - self.owidth, self.oheight = params.owidth, params.oheight - else - self.scale_factor = params - if self.scale_factor < 1 then - error('scale_factor must be greater than 1') - end - if math.floor(self.scale_factor) ~= self.scale_factor then - error('scale_factor must be integer') - end - end - self.inputSize = torch.LongStorage(4) - self.outputSize = torch.LongStorage(4) -end - -local function makeContiguous(self, input, gradOutput) - if not input:isContiguous() then - self._input = self._input or input.new() - self._input:resizeAs(input):copy(input) - input = self._input - end - if gradOutput then - if not gradOutput:isContiguous() then - self._gradOutput = self._gradOutput or gradOutput.new() - self._gradOutput:resizeAs(gradOutput):copy(gradOutput) - gradOutput = self._gradOutput - end - end - return input, gradOutput -end - -function SpatialUpSamplingBilinear:setSize(input) - local xdim = input:dim() - local ydim = xdim - 1 - for i = 1, input:dim() do - self.inputSize[i] = input:size(i) - self.outputSize[i] = input:size(i) - end - if self.scale_factor ~= nil then - self.outputSize[ydim] = self.outputSize[ydim] * self.scale_factor - self.outputSize[xdim] = self.outputSize[xdim] * self.scale_factor - else - self.outputSize[ydim] = self.oheight - self.outputSize[xdim] = self.owidth - end -end - -function SpatialUpSamplingBilinear:updateOutput(input) - assert(input:dim() == 4 or input:dim()==3, - 'SpatialUpSamplingBilinear only supports 3D or 4D tensors' ) - input = makeContiguous(self, input) - local inputwas3D = false - if input:dim() == 3 then - input=input:view(-1, input:size(1), input:size(2), input:size(3)) - inputwas3D = true - end - local xdim = input:dim() - local ydim = xdim - 1 - self:setSize(input) - input.THNN.SpatialUpSamplingBilinear_updateOutput( - input:cdata(), - self.output:cdata(), - self.outputSize[ydim], - self.outputSize[xdim] - ) - if inputwas3D then - input = input:squeeze(1) - self.output = self.output:squeeze(1) - end - return self.output -end - -function SpatialUpSamplingBilinear:updateGradInput(input, gradOutput) - assert(input:dim() == 4 or input:dim()==3, - 'SpatialUpSamplingBilinear only support 3D or 4D tensors' ) - assert(input:dim() == gradOutput:dim(), - 'Input and gradOutput should be of same dimension' ) - input, gradOutput = makeContiguous(self, input, gradOutput) - local inputwas3D = false - if input:dim() == 3 then - input = input:view(-1, input:size(1), input:size(2), input:size(3)) - gradOutput = gradOutput:view(-1, gradOutput:size(1), gradOutput:size(2), - gradOutput:size(3)) - inputwas3D = true - end - local xdim = input:dim() - local ydim = xdim - 1 - self.gradInput:resizeAs(input) - input.THNN.SpatialUpSamplingBilinear_updateGradInput( - gradOutput:cdata(), - self.gradInput:cdata(), - input:size(1), - input:size(2), - input:size(3), - input:size(4), - self.outputSize[ydim], - self.outputSize[xdim] - ) - if inputwas3D then - input = input:squeeze(1) - gradOutput = gradOutput:squeeze(1) - self.gradInput = self.gradInput:squeeze(1) - end - return self.gradInput -end - - -function SpatialUpSamplingBilinear:__tostring__() - local s - if self.scale_factor ~= nil then - s = string.format('%s(%d)', torch.type(self), self.scale_factor) - else - s = string.format('%s(%d, %d)', - torch.type(self), self.oheight, self.owidth) - end - return s -end diff --git a/contrib/lua-torch/nn/SpatialUpSamplingNearest.lua b/contrib/lua-torch/nn/SpatialUpSamplingNearest.lua deleted file mode 100644 index 362ae73a31..0000000000 --- a/contrib/lua-torch/nn/SpatialUpSamplingNearest.lua +++ /dev/null @@ -1,59 +0,0 @@ -local SpatialUpSamplingNearest, parent = torch.class('nn.SpatialUpSamplingNearest', 'nn.Module') - ---[[ -Applies a 2D up-sampling over an input image composed of several input planes. - -The upsampling is done using the simple nearest neighbor technique. - -The Y and X dimensions are assumed to be the last 2 tensor dimensions. For -instance, if the tensor is 4D, then dim 3 is the y dimension and dim 4 is the x. - -owidth = width*scale_factor -oheight = height*scale_factor ---]] - -function SpatialUpSamplingNearest:__init(scale) - parent.__init(self) - - self.scale_factor = scale - if self.scale_factor < 1 then - error('scale_factor must be greater than 1') - end - if math.floor(self.scale_factor) ~= self.scale_factor then - error('scale_factor must be integer') - end - self.inputSize = torch.LongStorage(4) - self.outputSize = torch.LongStorage(4) -end - -function SpatialUpSamplingNearest:updateOutput(input) - if input:dim() ~= 4 and input:dim() ~= 3 then - error('SpatialUpSamplingNearest only support 3D or 4D tensors') - end - -- Copy the input size - local xdim = input:dim() - local ydim = input:dim() - 1 - for i = 1, input:dim() do - self.inputSize[i] = input:size(i) - self.outputSize[i] = input:size(i) - end - self.outputSize[ydim] = self.outputSize[ydim] * self.scale_factor - self.outputSize[xdim] = self.outputSize[xdim] * self.scale_factor - input.THNN.SpatialUpSamplingNearest_updateOutput( - input:cdata(), - self.output:cdata(), - self.scale_factor - ) - return self.output -end - -function SpatialUpSamplingNearest:updateGradInput(input, gradOutput) - self.gradInput:resizeAs(input) - input.THNN.SpatialUpSamplingNearest_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.scale_factor - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/SpatialZeroPadding.lua b/contrib/lua-torch/nn/SpatialZeroPadding.lua deleted file mode 100644 index f19925841b..0000000000 --- a/contrib/lua-torch/nn/SpatialZeroPadding.lua +++ /dev/null @@ -1,104 +0,0 @@ -local SpatialZeroPadding, parent = torch.class('nn.SpatialZeroPadding', 'nn.Module') - -function SpatialZeroPadding:__init(pad_l, pad_r, pad_t, pad_b) - parent.__init(self) - self.pad_l = pad_l - self.pad_r = pad_r or self.pad_l - self.pad_t = pad_t or self.pad_l - self.pad_b = pad_b or self.pad_l -end - -function SpatialZeroPadding:updateOutput(input) - if input:dim() == 3 then - -- sizes - local h = input:size(2) + self.pad_t + self.pad_b - local w = input:size(3) + self.pad_l + self.pad_r - if w < 1 or h < 1 then error('input is too small') end - self.output:resize(input:size(1), h, w) - self.output:zero() - -- crop input if necessary - local c_input = input - if self.pad_t < 0 then c_input = c_input:narrow(2, 1 - self.pad_t, c_input:size(2) + self.pad_t) end - if self.pad_b < 0 then c_input = c_input:narrow(2, 1, c_input:size(2) + self.pad_b) end - if self.pad_l < 0 then c_input = c_input:narrow(3, 1 - self.pad_l, c_input:size(3) + self.pad_l) end - if self.pad_r < 0 then c_input = c_input:narrow(3, 1, c_input:size(3) + self.pad_r) end - -- crop outout if necessary - local c_output = self.output - if self.pad_t > 0 then c_output = c_output:narrow(2, 1 + self.pad_t, c_output:size(2) - self.pad_t) end - if self.pad_b > 0 then c_output = c_output:narrow(2, 1, c_output:size(2) - self.pad_b) end - if self.pad_l > 0 then c_output = c_output:narrow(3, 1 + self.pad_l, c_output:size(3) - self.pad_l) end - if self.pad_r > 0 then c_output = c_output:narrow(3, 1, c_output:size(3) - self.pad_r) end - -- copy input to output - c_output:copy(c_input) - elseif input:dim() == 4 then - -- sizes - local h = input:size(3) + self.pad_t + self.pad_b - local w = input:size(4) + self.pad_l + self.pad_r - if w < 1 or h < 1 then error('input is too small') end - self.output:resize(input:size(1), input:size(2), h, w) - self.output:zero() - -- crop input if necessary - local c_input = input - if self.pad_t < 0 then c_input = c_input:narrow(3, 1 - self.pad_t, c_input:size(3) + self.pad_t) end - if self.pad_b < 0 then c_input = c_input:narrow(3, 1, c_input:size(3) + self.pad_b) end - if self.pad_l < 0 then c_input = c_input:narrow(4, 1 - self.pad_l, c_input:size(4) + self.pad_l) end - if self.pad_r < 0 then c_input = c_input:narrow(4, 1, c_input:size(4) + self.pad_r) end - -- crop outout if necessary - local c_output = self.output - if self.pad_t > 0 then c_output = c_output:narrow(3, 1 + self.pad_t, c_output:size(3) - self.pad_t) end - if self.pad_b > 0 then c_output = c_output:narrow(3, 1, c_output:size(3) - self.pad_b) end - if self.pad_l > 0 then c_output = c_output:narrow(4, 1 + self.pad_l, c_output:size(4) - self.pad_l) end - if self.pad_r > 0 then c_output = c_output:narrow(4, 1, c_output:size(4) - self.pad_r) end - -- copy input to output - c_output:copy(c_input) - else - error('input must be 3 or 4-dimensional') - end - return self.output -end - -function SpatialZeroPadding:updateGradInput(input, gradOutput) - if input:dim() == 3 then - self.gradInput:resizeAs(input):zero() - -- crop gradInput if necessary - local cg_input = self.gradInput - if self.pad_t < 0 then cg_input = cg_input:narrow(2, 1 - self.pad_t, cg_input:size(2) + self.pad_t) end - if self.pad_b < 0 then cg_input = cg_input:narrow(2, 1, cg_input:size(2) + self.pad_b) end - if self.pad_l < 0 then cg_input = cg_input:narrow(3, 1 - self.pad_l, cg_input:size(3) + self.pad_l) end - if self.pad_r < 0 then cg_input = cg_input:narrow(3, 1, cg_input:size(3) + self.pad_r) end - -- crop gradOutout if necessary - local cg_output = gradOutput - if self.pad_t > 0 then cg_output = cg_output:narrow(2, 1 + self.pad_t, cg_output:size(2) - self.pad_t) end - if self.pad_b > 0 then cg_output = cg_output:narrow(2, 1, cg_output:size(2) - self.pad_b) end - if self.pad_l > 0 then cg_output = cg_output:narrow(3, 1 + self.pad_l, cg_output:size(3) - self.pad_l) end - if self.pad_r > 0 then cg_output = cg_output:narrow(3, 1, cg_output:size(3) - self.pad_r) end - -- copy gradOuput to gradInput - cg_input:copy(cg_output) - elseif input:dim() == 4 then - self.gradInput:resizeAs(input):zero() - -- crop gradInput if necessary - local cg_input = self.gradInput - if self.pad_t < 0 then cg_input = cg_input:narrow(3, 1 - self.pad_t, cg_input:size(3) + self.pad_t) end - if self.pad_b < 0 then cg_input = cg_input:narrow(3, 1, cg_input:size(3) + self.pad_b) end - if self.pad_l < 0 then cg_input = cg_input:narrow(4, 1 - self.pad_l, cg_input:size(4) + self.pad_l) end - if self.pad_r < 0 then cg_input = cg_input:narrow(4, 1, cg_input:size(4) + self.pad_r) end - -- crop gradOutout if necessary - local cg_output = gradOutput - if self.pad_t > 0 then cg_output = cg_output:narrow(3, 1 + self.pad_t, cg_output:size(3) - self.pad_t) end - if self.pad_b > 0 then cg_output = cg_output:narrow(3, 1, cg_output:size(3) - self.pad_b) end - if self.pad_l > 0 then cg_output = cg_output:narrow(4, 1 + self.pad_l, cg_output:size(4) - self.pad_l) end - if self.pad_r > 0 then cg_output = cg_output:narrow(4, 1, cg_output:size(4) - self.pad_r) end - -- copy gradOuput to gradInput - cg_input:copy(cg_output) - else - error('input must be 3 or 4-dimensional') - end - return self.gradInput -end - - -function SpatialZeroPadding:__tostring__() - return torch.type(self) .. - string.format('(l=%d, r=%d, t=%d, b=%d)', self.pad_l, self.pad_r, - self.pad_t, self.pad_b) -end diff --git a/contrib/lua-torch/nn/SplitTable.lua b/contrib/lua-torch/nn/SplitTable.lua deleted file mode 100644 index 7c4f968e6a..0000000000 --- a/contrib/lua-torch/nn/SplitTable.lua +++ /dev/null @@ -1,43 +0,0 @@ -local SplitTable, parent = torch.class('nn.SplitTable', 'nn.Module') - -function SplitTable:__init(dimension, nInputDims) - parent.__init(self) - self.dimension = dimension - self.nInputDims = nInputDims -end - -function SplitTable:_getPositiveDimension(input) - local dimension = self.dimension - if dimension < 0 then - dimension = input:dim() + dimension + 1 - elseif self.nInputDims and input:dim()==(self.nInputDims+1) then - dimension = dimension + 1 - end - return dimension -end - -function SplitTable:updateOutput(input) - local dimension = self:_getPositiveDimension(input) - local slices = input:size(dimension) - - local currentOutput= {} - for i=1,slices do - currentOutput[#currentOutput+1] = input:select(dimension,i) - end - self.output = currentOutput - return self.output -end - -function SplitTable:updateGradInput(input, gradOutput) - local dimension = self:_getPositiveDimension(input) - local slices = input:size(dimension) - if self.gradInput then - self.gradInput:resizeAs(input) - - for i=1,slices do - local currentGradInput = gradOutput[i]; - self.gradInput:select(dimension,i):copy(currentGradInput) - end - end - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Sqrt.lua b/contrib/lua-torch/nn/Sqrt.lua deleted file mode 100644 index df354a1754..0000000000 --- a/contrib/lua-torch/nn/Sqrt.lua +++ /dev/null @@ -1,26 +0,0 @@ -local Sqrt, parent = torch.class('nn.Sqrt','nn.Module') - -function Sqrt:__init(b) - parent.__init(self) - self.eps = b or 0 -end - -function Sqrt:updateOutput(input) - self.eps = self.eps or 0 - input.THNN.Sqrt_updateOutput( - input:cdata(), - self.output:cdata(), - self.eps - ) - return self.output -end - -function Sqrt:updateGradInput(input, gradOutput) - input.THNN.Sqrt_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.output:cdata() - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Square.lua b/contrib/lua-torch/nn/Square.lua deleted file mode 100644 index a6292afb93..0000000000 --- a/contrib/lua-torch/nn/Square.lua +++ /dev/null @@ -1,22 +0,0 @@ -local Square, parent = torch.class('nn.Square', 'nn.Module') - -function Square:__init(args) - parent.__init(self) -end - -function Square:updateOutput(input) - input.THNN.Square_updateOutput( - input:cdata(), - self.output:cdata() - ) - return self.output -end - -function Square:updateGradInput(input, gradOutput) - input.THNN.Square_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata() - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Squeeze.lua b/contrib/lua-torch/nn/Squeeze.lua deleted file mode 100644 index 7d204a19d2..0000000000 --- a/contrib/lua-torch/nn/Squeeze.lua +++ /dev/null @@ -1,40 +0,0 @@ -local Squeeze, parent = torch.class('nn.Squeeze', 'nn.Module') - -function Squeeze:__init(dim, numInputDims) - parent.__init(self) - self.dim = dim - self:setNumInputDims(numInputDims) -end - -function Squeeze:setNumInputDims(numInputDims) - self.numInputDims = numInputDims - return self -end - -function Squeeze:updateOutput(input) - assert(input and torch.isTensor(input), 'Squeeze only works on tensors') - local dim = self.dim - local addone = false - if self.numInputDims and input:dim()==(self.numInputDims+1) then - if dim then - dim = dim + 1 - elseif input:size(1) == 1 then - addone = true -- in case of minibatch of size 1. - end - end - self.output:set(dim and input:squeeze(dim) or input:squeeze()) - if addone then - local s = self.output:size():totable{} - table.insert(s, 1, 1) - self.output:set(self.output:view(torch.LongStorage(s))) - end - return self.output -end - -function Squeeze:updateGradInput(input, gradOutput) - assert(input and torch.isTensor(input), 'Squeeze only works on tensors') - assert(gradOutput and torch.isTensor(gradOutput), 'Squeeze only works on tensors') - assert(input:nElement() == gradOutput:nElement()) - self.gradInput:set(gradOutput:view(input:size())) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/StochasticGradient.lua b/contrib/lua-torch/nn/StochasticGradient.lua deleted file mode 100644 index dc80be1b15..0000000000 --- a/contrib/lua-torch/nn/StochasticGradient.lua +++ /dev/null @@ -1,65 +0,0 @@ -local StochasticGradient = torch.class('nn.StochasticGradient') - -function StochasticGradient:__init(module, criterion) - self.learningRate = 0.01 - self.learningRateDecay = 0 - self.maxIteration = 25 - self.shuffleIndices = true - self.module = module - self.criterion = criterion - self.verbose = true - self.logger = function(s) - print(s) - end -end - -function StochasticGradient:train(dataset) - local iteration = 1 - local currentLearningRate = self.learningRate - local module = self.module - local criterion = self.criterion - - local shuffledIndices = torch.randperm(dataset:size(), 'torch.LongTensor') - if not self.shuffleIndices then - for t = 1,dataset:size() do - shuffledIndices[t] = t - end - end - - self.logger("# StochasticGradient: training") - - while true do - local currentError = 0 - for t = 1,dataset:size() do - local example = dataset[shuffledIndices[t]] - local input = example[1] - local target = example[2] - - currentError = currentError + criterion:forward(module:forward(input), target) - - module:updateGradInput(input, criterion:updateGradInput(module.output, target)) - module:accUpdateGradParameters(input, criterion.gradInput, currentLearningRate) - - if self.hookExample then - self.hookExample(self, example) - end - end - - currentError = currentError / dataset:size() - - if self.hookIteration then - self.hookIteration(self, iteration, currentError) - end - - if self.verbose then - self.logger("# current error = " .. currentError) - end - iteration = iteration + 1 - currentLearningRate = self.learningRate/(1+iteration*self.learningRateDecay) - if self.maxIteration > 0 and iteration > self.maxIteration then - self.logger("# StochasticGradient: you have reached the maximum number of iterations") - self.logger("# training error = " .. currentError) - break - end - end -end diff --git a/contrib/lua-torch/nn/Sum.lua b/contrib/lua-torch/nn/Sum.lua deleted file mode 100644 index 7fe8a1ab8a..0000000000 --- a/contrib/lua-torch/nn/Sum.lua +++ /dev/null @@ -1,67 +0,0 @@ -local Sum, parent = torch.class('nn.Sum', 'nn.Module') - -function Sum:__init(dimension, nInputDims, sizeAverage, squeeze) - parent.__init(self) - self.dimension = dimension or 1 - -- do not assign default value to nInputDims or it will break backward compatibility - self.nInputDims = nInputDims - self.sizeAverage = sizeAverage or false - if squeeze ~= nil then - assert(type(squeeze) == 'boolean', 'squeeze has to be true/false') - self.squeeze = squeeze - else - self.squeeze = true - end -end - -function Sum:_getPositiveDimension(input) - local dimension = self.dimension - if dimension < 0 then - dimension = input:dim() + dimension + 1 - elseif self.nInputDims and input:dim()==(self.nInputDims+1) then - dimension = dimension + 1 - end - assert(input:dim() >= dimension, "dimension exceeds input dimensions") - return dimension -end - -function Sum:updateOutput(input) - local dimension = self:_getPositiveDimension(input) - if type(self.output) == 'number' then - self.output = input.new() - end - self.output:sum(input, dimension) - if self.sizeAverage then - self.output:div(input:size(dimension)) - end - if (self.squeeze == nil or self.squeeze) and self.output:nDimension() > 1 then - self.output:set(self.output:select(dimension, 1)) - end - return self.output -end - -function Sum:updateGradInput(input, gradOutput) - local dimension = self:_getPositiveDimension(input) - -- zero-strides don't work with MKL/BLAS, so - -- don't set self.gradInput to zero-stride tensor. - -- Instead, do a deepcopy - local size = input:size() - size[dimension] = 1 - if not gradOutput:isContiguous() then - self._gradOutput = self._gradOutput or gradOutput.new() - self._gradOutput:resizeAs(gradOutput):copy(gradOutput) - gradOutput = self._gradOutput - end - gradOutput = gradOutput:view(size) - self.gradInput:resizeAs(input) - self.gradInput:copy(gradOutput:expandAs(input)) - if self.sizeAverage then - self.gradInput:div(input:size(dimension)) - end - return self.gradInput -end - -function Sum:clearState() - nn.utils.clear(self, '_gradOutput') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/THNN.lua b/contrib/lua-torch/nn/THNN.lua deleted file mode 100644 index 0848e9ed22..0000000000 --- a/contrib/lua-torch/nn/THNN.lua +++ /dev/null @@ -1,140 +0,0 @@ -local ffi = require 'ffi' - -local THNN = {} - - -local generic_THNN_h = require 'nn.THNN_h' --- strip all lines starting with # --- to remove preprocessor directives originally present --- in THNN.h -generic_THNN_h = generic_THNN_h:gsub("\n#[^\n]*", "") -generic_THNN_h = generic_THNN_h:gsub("^#[^\n]*\n", "") - --- THGenerator struct declaration copied from torch7/lib/TH/THRandom.h -local base_declarations = [[ -typedef void THNNState; - -typedef struct { - unsigned long the_initial_seed; - int left; - int seeded; - unsigned long next; - unsigned long state[624]; /* the array for the state vector 624 = _MERSENNE_STATE_N */ - double normal_x; - double normal_y; - double normal_rho; - int normal_is_valid; -} THGenerator; -]] - --- polyfill for LUA 5.1 -if not package.searchpath then - local sep = package.config:sub(1,1) - function package.searchpath(mod, path) - mod = mod:gsub('%.', sep) - for m in path:gmatch('[^;]+') do - local nm = m:gsub('?', mod) - local f = io.open(nm, 'r') - if f then - f:close() - return nm - end - end - end -end - --- load libTHNN -THNN.C = ffi.load(package.searchpath('libTHNN', package.cpath)) - -ffi.cdef(base_declarations) - --- expand macros, allow to use original lines from lib/THNN/generic/THNN.h -local preprocessed = string.gsub(generic_THNN_h, 'TH_API void THNN_%(([%a%d_]+)%)', 'void THNN_TYPE%1') - -local replacements = -{ - { - ['TYPE'] = 'Double', - ['accreal'] = 'double', - ['THTensor'] = 'THDoubleTensor', - ['THIndexTensor'] = 'THLongTensor', - ['THIntegerTensor'] = 'THIntTensor', - ['THIndex_t'] = 'long', - ['THInteger_t'] = 'int' - }, - { - ['TYPE'] = 'Float', - ['accreal'] = 'double', - ['THTensor'] = 'THFloatTensor', - ['THIndexTensor'] = 'THLongTensor', - ['THIntegerTensor'] = 'THIntTensor', - ['THIndex_t'] = 'long', - ['THInteger_t'] = 'int' - } -} - -for i=1,#replacements do - local r = replacements[i] - local s = preprocessed - for k,v in pairs(r) do - s = string.gsub(s, k, v) - end - ffi.cdef(s) -end - -THNN.NULL = ffi.NULL or nil - -function THNN.getState() - return ffi.NULL or nil -end - -function THNN.optionalTensor(t) - return t and t:cdata() or THNN.NULL -end - -local function extract_function_names(s) - local t = {} - for n in string.gmatch(s, 'TH_API void THNN_%(([%a%d_]+)%)') do - t[#t+1] = n - end - return t -end - -function THNN.bind(lib, base_names, type_name, state_getter) - local ftable = {} - local prefix = 'THNN_' .. type_name - for i,n in ipairs(base_names) do - -- use pcall since some libs might not support all functions (e.g. cunn) - local ok,v = pcall(function() return lib[prefix .. n] end) - if ok then - ftable[n] = function(...) v(state_getter(), ...) end -- implicitely add state - else - print('not found: ' .. prefix .. n .. v) - end - end - return ftable -end - --- build function table -local function_names = extract_function_names(generic_THNN_h) - -THNN.kernels = {} -THNN.kernels['torch.FloatTensor'] = THNN.bind(THNN.C, function_names, 'Float', THNN.getState) -THNN.kernels['torch.DoubleTensor'] = THNN.bind(THNN.C, function_names, 'Double', THNN.getState) - -torch.getmetatable('torch.FloatTensor').THNN = THNN.kernels['torch.FloatTensor'] -torch.getmetatable('torch.DoubleTensor').THNN = THNN.kernels['torch.DoubleTensor'] - -function THNN.runKernel(f, type, ...) - local ftable = THNN.kernels[type] - if not ftable then - error('Unsupported tensor type: '..type) - end - local f = ftable[f] - if not f then - error(string.format("Function '%s' not found for tensor type '%s'.", f, type)) - end - f(...) -end - -return THNN diff --git a/contrib/lua-torch/nn/Tanh.lua b/contrib/lua-torch/nn/Tanh.lua deleted file mode 100644 index fc42cbbfd0..0000000000 --- a/contrib/lua-torch/nn/Tanh.lua +++ /dev/null @@ -1,19 +0,0 @@ -local Tanh = torch.class('nn.Tanh', 'nn.Module') - -function Tanh:updateOutput(input) - input.THNN.Tanh_updateOutput( - input:cdata(), - self.output:cdata() - ) - return self.output -end - -function Tanh:updateGradInput(input, gradOutput) - input.THNN.Tanh_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.output:cdata() - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/TanhShrink.lua b/contrib/lua-torch/nn/TanhShrink.lua deleted file mode 100644 index 96df6c5b7d..0000000000 --- a/contrib/lua-torch/nn/TanhShrink.lua +++ /dev/null @@ -1,20 +0,0 @@ -local TanhShrink, parent = torch.class('nn.TanhShrink','nn.Module') - -function TanhShrink:__init() - parent.__init(self) - self.tanh = nn.Tanh() -end - -function TanhShrink:updateOutput(input) - local th = self.tanh:updateOutput(input) - self.output:resizeAs(input):copy(input) - self.output:add(-1,th) - return self.output -end - -function TanhShrink:updateGradInput(input, gradOutput) - local dth = self.tanh:updateGradInput(input,gradOutput) - self.gradInput:resizeAs(input):copy(gradOutput) - self.gradInput:add(-1,dth) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/TemporalConvolution.lua b/contrib/lua-torch/nn/TemporalConvolution.lua deleted file mode 100644 index 4b3a89eb6c..0000000000 --- a/contrib/lua-torch/nn/TemporalConvolution.lua +++ /dev/null @@ -1,73 +0,0 @@ -local TemporalConvolution, parent = torch.class('nn.TemporalConvolution', 'nn.Module') - -function TemporalConvolution:__init(inputFrameSize, outputFrameSize, kW, dW) - parent.__init(self) - - dW = dW or 1 - - self.inputFrameSize = inputFrameSize - self.outputFrameSize = outputFrameSize - self.kW = kW - self.dW = dW - - self.weight = torch.Tensor(outputFrameSize, inputFrameSize*kW) - self.bias = torch.Tensor(outputFrameSize) - self.gradWeight = torch.Tensor(outputFrameSize, inputFrameSize*kW) - self.gradBias = torch.Tensor(outputFrameSize) - - self:reset() -end - -function TemporalConvolution:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1/math.sqrt(self.kW*self.inputFrameSize) - end - if nn.oldSeed then - self.weight:apply(function() - return torch.uniform(-stdv, stdv) - end) - self.bias:apply(function() - return torch.uniform(-stdv, stdv) - end) - else - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv) - end -end - -function TemporalConvolution:updateOutput(input) - input.THNN.TemporalConvolution_updateOutput( - input:cdata(), self.output:cdata(), - self.weight:cdata(), self.bias:cdata(), - self.kW, self.dW, - self.inputFrameSize, self.outputFrameSize - ) - return self.output -end - -function TemporalConvolution:updateGradInput(input, gradOutput) - if self.gradInput then - input.THNN.TemporalConvolution_updateGradInput( - input:cdata(), gradOutput:cdata(), - self.gradInput:cdata(), self.weight:cdata(), - self.kW, self.dW - ) - return self.gradInput - end -end - -function TemporalConvolution:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - input.THNN.TemporalConvolution_accGradParameters( - input:cdata(), gradOutput:cdata(), - self.gradWeight:cdata(), self.gradBias:cdata(), - self.kW, self.dW, scale - ) -end - -function TemporalConvolution:sharedAccUpdateGradParameters(input, gradOutput, lr) - -- we do not need to accumulate parameters when sharing: - self:defaultAccUpdateGradParameters(input, gradOutput, lr) -end diff --git a/contrib/lua-torch/nn/TemporalDynamicKMaxPooling.lua b/contrib/lua-torch/nn/TemporalDynamicKMaxPooling.lua deleted file mode 100644 index 644a0fa9cb..0000000000 --- a/contrib/lua-torch/nn/TemporalDynamicKMaxPooling.lua +++ /dev/null @@ -1,65 +0,0 @@ ---[[ - This file implements Dynamic K Max Pooling as described in the paper: - "A Convolutional Neural Network for Modelling Sentences" - by Nal Kalchbrenner, Edward Grefenstette, Phil Blunsom - - The operation is simply selecting the k highest values out of a sequence. - k can be a calculated value or pre-defined - - The value of k can be calulated as in the paper by using: - k_top as minK - (L-l)/L as factor - - Where: - k_top is the desired sequence length at the end of the convolution part, - L is the total number of layers, - l is this layers number -]] - -local TemporalDynamicKMaxPooling, parent = torch.class('nn.TemporalDynamicKMaxPooling', 'nn.Module') - -function TemporalDynamicKMaxPooling:__init(minK, factor) - parent.__init(self) - - self.minK = minK - self.factor = factor or 0 -end - -function TemporalDynamicKMaxPooling:updateOutput(input) - assert(input:dim() == 2 or input:dim() == 3, 'Only 2D or 3D(batch mode) accepted') - - local seqDim = input:dim()-1 - local k = math.max(self.minK, math.ceil(self.factor*input:size(seqDim))) - assert(input:size(seqDim) >= self.minK, 'Input sequence length (' .. input:size(seqDim) .. ') too small for desired k value (' .. k .. ')') - - -- Sort input in descending order - local sorted, allIndices = input:sort(seqDim,true) - -- Reduce the indices to only include the top-k and return to original order by sorting - self.indices = allIndices:narrow(seqDim, 1, k):sort(seqDim) - - self.output = input:gather(seqDim, self.indices) - - return self.output -end - -function TemporalDynamicKMaxPooling:updateGradInput(input, gradOutput) - if self.gradInput then - local seqDim = input:dim()-1 - - self.gradInput:resizeAs(input) - self.gradInput:zero() - - -- Using the previously stored indices, add the gradOutputs to their respective - -- input indices in the self.gradInput buffer - local updateValues = self.gradInput:gather(seqDim, self.indices) - updateValues:add(gradOutput) - self.gradInput:scatter(seqDim, self.indices, updateValues) - - return self.gradInput - end -end - -function TemporalDynamicKMaxPooling:clearState() - nn.utils.clear(self, 'indices') - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/TemporalMaxPooling.lua b/contrib/lua-torch/nn/TemporalMaxPooling.lua deleted file mode 100644 index 894f4a99f0..0000000000 --- a/contrib/lua-torch/nn/TemporalMaxPooling.lua +++ /dev/null @@ -1,44 +0,0 @@ -local TemporalMaxPooling, parent = torch.class('nn.TemporalMaxPooling', 'nn.Module') - -function TemporalMaxPooling:__init(kW, dW) - parent.__init(self) - - dW = dW or kW - - self.kW = kW - self.dW = dW -end - -function TemporalMaxPooling:updateOutput(input) - self.indices = self.indices or torch.LongTensor() - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.indices = torch.CudaLongTensor and self.indices:cudaLong() or self.indices - else - self.indices = self.indices:long() - end - input.THNN.TemporalMaxPooling_updateOutput( - input:cdata(), self.output:cdata(), - self.indices:cdata(), self.kW, self.dW - ) - return self.output -end - -function TemporalMaxPooling:updateGradInput(input, gradOutput) - if self.gradInput then - input.THNN.TemporalMaxPooling_updateGradInput( - input:cdata(), gradOutput:cdata(), - self.gradInput:cdata(), self.indices:cdata(), - self.kW, self.dW - ) - return self.gradInput - end -end - -function TemporalMaxPooling:empty() - self:clearState() -end - -function TemporalMaxPooling:clearState() - if self.indices then self.indices:set() end - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/TemporalRowConvolution.lua b/contrib/lua-torch/nn/TemporalRowConvolution.lua deleted file mode 100644 index 7c9d6a2694..0000000000 --- a/contrib/lua-torch/nn/TemporalRowConvolution.lua +++ /dev/null @@ -1,120 +0,0 @@ -local THNN = require "nn.THNN" - -local TemporalRowConvolution, parent = torch.class("nn.TemporalRowConvolution", "nn.Module") - -function TemporalRowConvolution:__init(inputFrameSize, kW, dW, featFirst) - parent.__init(self) - - self.inputFrameSize = inputFrameSize - self.kW = kW - self.dW = dW or 1 - - self.weight = torch.Tensor(inputFrameSize, 1, kW) - self.bias = torch.Tensor(inputFrameSize) - self.gradWeight = torch.Tensor(inputFrameSize, 1, kW) - self.gradBias = torch.Tensor(inputFrameSize) - - -- Set to true for batch x inputFrameSize x nInputFrame - self.featFirst = featFirst and true or false - self:reset() -end - -function TemporalRowConvolution:noBias() - self.bias = nil - self.gradBias = nil - return self -end - -function TemporalRowConvolution:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1 / math.sqrt(self.kW * self.inputFrameSize) - end - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv) -end - -function TemporalRowConvolution:updateOutput(input) - assert(input.THNN, torch.type(input)..".THNN backend not imported") - self.finput = self.finput or input.new() - self.fgradInput = self.fgradInput or input.new() - - input.THNN.TemporalRowConvolution_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - THNN.optionalTensor(self.bias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, - self.dW, - 0, -- would be self.padW - self.featFirst - ) - - return self.output -end - -function TemporalRowConvolution:updateGradInput(input, gradOutput) - assert(input.THNN, torch.type(input)..".THNN backend not imported") - - if self.gradInput then - input.THNN.TemporalRowConvolution_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, - self.dW, - 0, -- would be self.padW - self.featFirst - ) - return self.gradInput - end -end - -function TemporalRowConvolution:accGradParameters(input, gradOutput, scale) - assert(input.THNN, torch.type(input)..".THNN backend not imported") - - input.THNN.TemporalRowConvolution_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - THNN.optionalTensor(self.gradBias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kW, - self.dW, - 0, -- would be self.padW - self.featFirst, - scale or 1) -end - -function TemporalRowConvolution:type(type, tensorCache) - if self.finput then self.finput:set() end - if self.fgradInput then self.fgradInput:set() end - return parent.type(self, type, tensorCache) -end - -function TemporalRowConvolution:__tostring__() - local s = string.format("%s(%d, %d", torch.type(self), self.inputFrameSize, self.kW) - if self.dW ~= 1 then - s = s .. string.format(", %d", self.dW) - end - if self.padW and self.padW ~= 0 then -- currently padding is not supported - s = s .. ", " .. self.padW - end - if self.bias then - return s .. ")" - else - return s .. ") without bias" - end -end - -function TemporalRowConvolution:clearState() - nn.utils.clear(self, "finput", "fgradInput", "_input", "_gradOutput") - return parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/TemporalSubSampling.lua b/contrib/lua-torch/nn/TemporalSubSampling.lua deleted file mode 100644 index e9287d63df..0000000000 --- a/contrib/lua-torch/nn/TemporalSubSampling.lua +++ /dev/null @@ -1,64 +0,0 @@ -local TemporalSubSampling, parent = torch.class('nn.TemporalSubSampling', 'nn.Module') - -function TemporalSubSampling:__init(inputFrameSize, kW, dW) - parent.__init(self) - - dW = dW or 1 - - self.inputFrameSize = inputFrameSize - self.kW = kW - self.dW = dW - - self.weight = torch.Tensor(inputFrameSize) - self.bias = torch.Tensor(inputFrameSize) - self.gradWeight = torch.Tensor(inputFrameSize) - self.gradBias = torch.Tensor(inputFrameSize) - - self:reset() -end - -function TemporalSubSampling:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1/math.sqrt(self.kW) - end - if nn.oldSeed then - self.weight:apply(function() - return torch.uniform(-stdv, stdv) - end) - self.bias:apply(function() - return torch.uniform(-stdv, stdv) - end) - else - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv) - end -end - -function TemporalSubSampling:updateOutput(input) - input.THNN.TemporalSubSampling_updateOutput( - input:cdata(), self.output:cdata(), - self.weight:cdata(), self.bias:cdata(), - self.kW, self.dW, self.inputFrameSize - ) - return self.output -end - -function TemporalSubSampling:updateGradInput(input, gradOutput) - if self.gradInput then - input.THNN.TemporalSubSampling_updateGradInput( - input:cdata(), gradOutput:cdata(), self.gradInput:cdata(), - self.weight:cdata(), self.kW, self.dW - ) - return self.gradInput - end -end - -function TemporalSubSampling:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - input.THNN.TemporalSubSampling_accGradParameters( - input:cdata(), gradOutput:cdata(), self.gradWeight:cdata(), - self.gradBias:cdata(), self.kW, self.dW, scale - ) -end diff --git a/contrib/lua-torch/nn/Threshold.lua b/contrib/lua-torch/nn/Threshold.lua deleted file mode 100644 index 6fdd264081..0000000000 --- a/contrib/lua-torch/nn/Threshold.lua +++ /dev/null @@ -1,51 +0,0 @@ -local Threshold, parent = torch.class('nn.Threshold','nn.Module') - -function Threshold:__init(th,v,ip) - parent.__init(self) - self.threshold = th or 1e-6 - self.val = v or 0 - if (th and type(th) ~= 'number') or (v and type(v) ~= 'number') then - error('nn.Threshold(threshold, value)') - end - -- default for inplace is false - self.inplace = ip or false - if (ip and type(ip) ~= 'boolean') then - error('in-place flag must be boolean') - end - self:validateParameters() -end - -function Threshold:updateOutput(input) - self:validateParameters() - input.THNN.Threshold_updateOutput( - input:cdata(), - self.output:cdata(), - self.threshold, - self.val, - self.inplace - ) - return self.output -end - -function Threshold:updateGradInput(input, gradOutput) - self:validateParameters() - input.THNN.Threshold_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.threshold, - self.val, - self.inplace - ) - return self.gradInput -end - -function Threshold:validateParameters() - self.inplace = self.inplace or false -- backwards compatibility pre inplace - if self.inplace then - if self.val > self.threshold then - error('in-place processing requires value (' .. self.val .. - ') not exceed threshold (' .. self.threshold .. ')') - end - end -end diff --git a/contrib/lua-torch/nn/Transpose.lua b/contrib/lua-torch/nn/Transpose.lua deleted file mode 100644 index cceb2b643e..0000000000 --- a/contrib/lua-torch/nn/Transpose.lua +++ /dev/null @@ -1,35 +0,0 @@ -local Transpose, parent = torch.class('nn.Transpose', 'nn.Module') - --- transpose dimensions: --- n = nn.Transpose({1,4},{1,3}) --- will transpose dims 1 and 4, then 1 and 3... - -function Transpose:__init(...) - parent.__init(self) - self.permutations = {...} - self.numInputDims = nil -end - -function Transpose:setNumInputDims(numInputDims) - self.numInputDims = numInputDims - return self -end - -function Transpose:updateOutput(input) - local offset = self.numInputDims and input:nDimension()-self.numInputDims or 0 - for _,perm in ipairs(self.permutations) do - input = input:transpose(perm[1]+offset,perm[2]+offset) - end - self.output:resizeAs(input):copy(input) - return self.output -end - -function Transpose:updateGradInput(input, gradOutput) - for i = #self.permutations,1,-1 do - local perm = self.permutations[i] - local offset = self.numInputDims and input:nDimension()-self.numInputDims or 0 - gradOutput = gradOutput:transpose(perm[1]+offset,perm[2]+offset) - end - self.gradInput:resizeAs(gradOutput):copy(gradOutput) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/Unsqueeze.lua b/contrib/lua-torch/nn/Unsqueeze.lua deleted file mode 100644 index 2e82a25a02..0000000000 --- a/contrib/lua-torch/nn/Unsqueeze.lua +++ /dev/null @@ -1,52 +0,0 @@ -local Unsqueeze, parent = torch.class('nn.Unsqueeze', 'nn.Module') - -local function _assertTensor(t) - assert(torch.isTensor(t), "This module only works on tensor") -end - -function Unsqueeze:__init(pos, numInputDims) - parent.__init(self) - self.pos = pos or error('the position to insert singleton dim not specified') - self:setNumInputDims(numInputDims) -end - -function Unsqueeze:setNumInputDims(numInputDims) - self.numInputDims = numInputDims - return self -end - -function Unsqueeze:updateOutput(input) - _assertTensor(input) - local actualPos = self:_getActualPosition(input) - nn.utils.addSingletonDimension(self.output, input, actualPos) - return self.output -end - -function Unsqueeze:updateGradInput(input, gradOutput) - _assertTensor(input) - _assertTensor(gradOutput) - assert(input:nElement() == gradOutput:nElement()) - - self.gradInput:view(gradOutput, input:size()) - return self.gradInput -end - -function Unsqueeze:__tostring__() - return torch.type(self)..'(dim ' .. self.pos .. ')' -end - -function Unsqueeze:_getActualPosition(input) - -- get valid dimesion offset for batchMode (if any) - local inputDim = input:dim() -- data batch dim - self.numInputDims = self.numInputDims or inputDim -- feature map dim - local offsetDim = inputDim - self.numInputDims - assert(offsetDim >= 0, "input feature map dim (numInputDims) must be <= input:dim()") - - -- the actual position; clearer error message for batchMode (if any) - local actualPos = self.pos + offsetDim - assert(actualPos >= 1 and actualPos <= (inputDim + 1), - ("Invalid position: %d. input:dim() is %d, input feature map dim (numInputDims) is %d.") - :format(self.pos, inputDim, self.numInputDims) - ) - return actualPos -end diff --git a/contrib/lua-torch/nn/View.lua b/contrib/lua-torch/nn/View.lua deleted file mode 100644 index 542e57e163..0000000000 --- a/contrib/lua-torch/nn/View.lua +++ /dev/null @@ -1,96 +0,0 @@ -local View, parent = torch.class('nn.View', 'nn.Module') - -function View:resetSize(...) - if select('#', ...) == 1 and torch.typename(select(1, ...)) == 'torch.LongStorage' then - self.size = select(1, ...) - else - self.size = torch.LongStorage({...}) - end - - self.numElements = 1 - local inferdim = false - for i = 1,#self.size do - local szi = self.size[i] - if szi >= 0 then - self.numElements = self.numElements * self.size[i] - else - assert(szi == -1, 'size should be positive or -1') - assert(not inferdim, 'only one dimension can be at -1') - inferdim = true - end - end - - return self -end - -function View:__init(...) - parent.__init(self) - self:resetSize(...) - self.numInputDims = nil -end - -function View:setNumInputDims(numInputDims) - self.numInputDims = numInputDims - return self -end - -local function batchsize(input, size, numInputDims, numElements) - local ind = input:nDimension() - local isz = input:size() - local maxdim = numInputDims and numInputDims or ind - local ine = 1 - for i=ind,ind-maxdim+1,-1 do - ine = ine * isz[i] - end - - if ine % numElements ~= 0 then - error(string.format( - 'input view (%s) and desired view (%s) do not match', - table.concat(input:size():totable(), 'x'), - table.concat(size:totable(), 'x'))) - end - - -- the remainder is either the batch... - local bsz = ine / numElements - - -- ... or the missing size dim - for i=1,size:size() do - if size[i] == -1 then - bsz = 1 - break - end - end - - -- for dim over maxdim, it is definitively the batch - for i=ind-maxdim,1,-1 do - bsz = bsz * isz[i] - end - - -- special card - if bsz == 1 and (not numInputDims or input:nDimension() <= numInputDims) then - return - end - - return bsz -end - -function View:updateOutput(input) - self.output = self.output or input.new() - local bsz = batchsize(input, self.size, self.numInputDims, self.numElements) - if bsz then - self.output:view(input, bsz, table.unpack(self.size:totable())) - else - self.output:view(input, self.size) - end - return self.output -end - -function View:updateGradInput(input, gradOutput) - self.gradInput = self.gradInput or gradOutput.new() - self.gradInput:view(gradOutput, input:size()) - return self.gradInput -end - -function View:__tostring__() - return torch.type(self)..'('..table.concat(self.size:totable(), ', ')..')' -end diff --git a/contrib/lua-torch/nn/VolumetricAveragePooling.lua b/contrib/lua-torch/nn/VolumetricAveragePooling.lua deleted file mode 100644 index df6d2c4057..0000000000 --- a/contrib/lua-torch/nn/VolumetricAveragePooling.lua +++ /dev/null @@ -1,54 +0,0 @@ -local VolumetricAveragePooling, parent = torch.class( - 'nn.VolumetricAveragePooling', 'nn.Module') - -function VolumetricAveragePooling:__init(kT, kW, kH, dT, dW, dH) - parent.__init(self) - - dT = dT or kT - dW = dW or kW - dH = dH or kH - - self.kT = kT - self.kH = kH - self.kW = kW - self.dT = dT - self.dW = dW - self.dH = dH -end - -function VolumetricAveragePooling:updateOutput(input) - input.THNN.VolumetricAveragePooling_updateOutput( - input:cdata(), - self.output:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH - ) - return self.output -end - -function VolumetricAveragePooling:updateGradInput(input, gradOutput) - input.THNN.VolumetricAveragePooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH - ) - return self.gradInput -end - -function VolumetricAveragePooling:empty() - return parent.clearState(self) -end - -function VolumetricAveragePooling:__tostring__() - local s = string.format('%s(%dx%dx%d, %d,%d,%d', torch.type(self), - self.kT, self.kW, self.kH, self.dT, self.dW, self.dH) - if (self.padT or self.padW or self.padH) and - (self.padT ~= 0 or self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padT.. ',' .. self.padW .. ','.. self.padH - end - s = s .. ')' - - return s -end diff --git a/contrib/lua-torch/nn/VolumetricBatchNormalization.lua b/contrib/lua-torch/nn/VolumetricBatchNormalization.lua deleted file mode 100644 index 6168a9245d..0000000000 --- a/contrib/lua-torch/nn/VolumetricBatchNormalization.lua +++ /dev/null @@ -1,4 +0,0 @@ -local BN, parent = torch.class('nn.VolumetricBatchNormalization', 'nn.BatchNormalization') - --- expected dimension of input -BN.nDim = 5 diff --git a/contrib/lua-torch/nn/VolumetricConvolution.lua b/contrib/lua-torch/nn/VolumetricConvolution.lua deleted file mode 100644 index 329609afff..0000000000 --- a/contrib/lua-torch/nn/VolumetricConvolution.lua +++ /dev/null @@ -1,169 +0,0 @@ -local THNN = require 'nn.THNN' -local VolumetricConvolution, parent = torch.class('nn.VolumetricConvolution', 'nn.Module') - -function VolumetricConvolution:__init(nInputPlane, nOutputPlane, kT, kW, kH, dT, dW, dH, padT, padW, padH) - parent.__init(self) - - dT = dT or 1 - dW = dW or 1 - dH = dH or 1 - - self.nInputPlane = nInputPlane - self.nOutputPlane = nOutputPlane - self.kT = kT - self.kW = kW - self.kH = kH - self.dT = dT - self.dW = dW - self.dH = dH - self.padT = padT or 0 - self.padW = padW or self.padT - self.padH = padH or self.padW - - self.weight = torch.Tensor(nOutputPlane, nInputPlane, kT, kH, kW) - self.bias = torch.Tensor(nOutputPlane) - self.gradWeight = torch.Tensor(nOutputPlane, nInputPlane, kT, kH, kW) - self.gradBias = torch.Tensor(nOutputPlane) - self:reset() -end - -function VolumetricConvolution:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1/math.sqrt(self.kT*self.kW*self.kH*self.nInputPlane) - end - if nn.oldSeed then - self.weight:apply(function() - return torch.uniform(-stdv, stdv) - end) - if self.bias then - self.bias:apply(function() - return torch.uniform(-stdv, stdv) - end) - end - else - self.weight:uniform(-stdv, stdv) - if self.bias then - self.bias:uniform(-stdv, stdv) - end - end -end - -function VolumetricConvolution:noBias() - self.bias = nil - self.gradBias = nil - return self -end - -function VolumetricConvolution:updateOutput(input) - self.finput = self.finput or input.new() - self.fgradInput = self.fgradInput or input.new() - if torch.typename(input):find('torch%.Cuda.*Tensor') then - input.THNN.VolumetricConvolution_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - THNN.optionalTensor(self.bias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH - ) - else - input.THNN.VolumetricConvolutionMM_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - THNN.optionalTensor(self.bias), - self.finput:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH - ) - end - return self.output -end - -function VolumetricConvolution:updateGradInput(input, gradOutput) - if torch.typename(input):find('torch%.Cuda.*Tensor') then - input.THNN.VolumetricConvolution_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH - ) - return self.gradInput - else - if self.gradInput then - input.THNN.VolumetricConvolutionMM_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH - ) - return self.gradInput - end - end -end - -function VolumetricConvolution:accGradParameters(input, gradOutput, scale) - if torch.typename(input):find('torch%.Cuda.*Tensor') then - input.THNN.VolumetricConvolution_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - THNN.optionalTensor(self.gradBias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - scale or 1 - ) - else - input.THNN.VolumetricConvolutionMM_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - THNN.optionalTensor(self.gradBias), - self.finput:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - scale or 1 - ) - end -end - -function VolumetricConvolution:type(type, tensorCache) - if self.finput then self.finput:set() end - if self.fgradInput then self.fgradInput:set() end - return parent.type(self, type, tensorCache) -end - -function VolumetricConvolution:clearState() - nn.utils.clear(self, 'finput', 'fgradInput', '_input', '_gradOutput') - return parent.clearState(self) -end - -function VolumetricConvolution:__tostring__() - local s = string.format('%s(%d -> %d, %dx%dx%d', torch.type(self), - self.nInputPlane, self.nOutputPlane, self.kT, self.kW, self.kH) - if self.dT ~= 1 or self.dW ~= 1 or self.dH ~= 1 or - self.padT ~= 0 or self.padW ~= 0 or self.padH ~= 0 then - s = s .. string.format(', %d,%d,%d', self.dT, self.dW, self.dH) - end - if (self.padT or self.padW or self.padH) and - (self.padT ~=0 or self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padT .. ',' .. self.padW .. ',' .. self.padH - end - return s .. ')' -end diff --git a/contrib/lua-torch/nn/VolumetricDilatedConvolution.lua b/contrib/lua-torch/nn/VolumetricDilatedConvolution.lua deleted file mode 100644 index f1337ebaa7..0000000000 --- a/contrib/lua-torch/nn/VolumetricDilatedConvolution.lua +++ /dev/null @@ -1,84 +0,0 @@ -local THNN = require 'nn.THNN' -local VolumetricDilatedConvolution, parent = torch.class('nn.VolumetricDilatedConvolution', 'nn.VolumetricConvolution') - -function VolumetricDilatedConvolution:__init(nInputPlane, nOutputPlane, kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH) - parent.__init(self, nInputPlane, nOutputPlane, kT, kW, kH, dT, dW, dH, padT, padW, padH) - - self.dilationT = dilationT or 1 - self.dilationW = dilationW or 1 - self.dilationH = dilationH or 1 -end - -function VolumetricDilatedConvolution:updateOutput(input) - self.finput = self.finput or self.weight.new() - self.fgradInput = self.fgradInput or self.weight.new() - input.THNN.VolumetricDilatedConvolution_updateOutput( - input:cdata(), - self.output:cdata(), - self.weight:cdata(), - THNN.optionalTensor(self.bias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - self.dilationT, self.dilationW, self.dilationH - ) - return self.output -end - -function VolumetricDilatedConvolution:updateGradInput(input, gradOutput) - if self.gradInput then - self.fgradInput = self.fgradInput or self.weight.new() - input.THNN.VolumetricDilatedConvolution_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - self.dilationT, self.dilationW, self.dilationH - ) - return self.gradInput - end -end - -function VolumetricDilatedConvolution:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - self.fgradInput = self.fgradInput or self.weight.new() - input.THNN.VolumetricDilatedConvolution_accGradParameters( - input:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - THNN.optionalTensor(self.gradBias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - self.dilationT, self.dilationW, self.dilationH, - scale - ) -end - -function VolumetricDilatedConvolution:__tostring__() - local s = string.format('%s(%d -> %d, %dx%dx%d', torch.type(self), - self.nInputPlane, self.nOutputPlane, self.kT, self.kW, self.kH) - if self.dT ~= 1 or self.dW ~= 1 or self.dH ~= 1 - or self.padT ~= 0 or self.padW ~= 0 or self.padH ~= 0 then - s = s .. string.format(', %d,%d,%d', self.dT, self.dW, self.dH) - end - if (self.padT or self.padW or self.padH) - and (self.padT ~= 0 or self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padT .. ',' .. self.padW .. ',' .. self.padH - end - s = s .. ', ' .. self.dilationT .. ',' - .. self.dilationW .. ',' .. self.dilationH - if self.bias then - return s .. ')' - else - return s .. ') without bias' - end -end diff --git a/contrib/lua-torch/nn/VolumetricDilatedMaxPooling.lua b/contrib/lua-torch/nn/VolumetricDilatedMaxPooling.lua deleted file mode 100644 index 249b2b58ed..0000000000 --- a/contrib/lua-torch/nn/VolumetricDilatedMaxPooling.lua +++ /dev/null @@ -1,71 +0,0 @@ -local THNN = require 'nn.THNN' -local VolumetricDilatedMaxPooling, parent = torch.class('nn.VolumetricDilatedMaxPooling', 'nn.VolumetricMaxPooling') - -function VolumetricDilatedMaxPooling:__init(kT, kW, kH, dT, dW, dH, padT, padW, padH, dilationT, dilationW, dilationH) - parent.__init(self, kT, kW, kH, dT, dW, dH, padT, padW, padH) - - self.dilationT = dilationT or 1 - self.dilationW = dilationW or 1 - self.dilationH = dilationH or 1 - -end - -function VolumetricDilatedMaxPooling:updateOutput(input) - local dims = input:dim() - self.itime = input:size(dims-2) - self.iheight = input:size(dims-1) - self.iwidth = input:size(dims) - - self.indices = self.indices or torch.LongTensor() - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.indices = torch.CudaLongTensor and self.indices:cudaLong() or self.indices - else - self.indices = self.indices:long() - end - input.THNN.VolumetricDilatedMaxPooling_updateOutput( - input:cdata(), - self.output:cdata(), - self.indices:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - self.dilationT, self.dilationW, self.dilationH, - self.ceil_mode - ) - return self.output -end - -function VolumetricDilatedMaxPooling:updateGradInput(input, gradOutput) - input.THNN.VolumetricDilatedMaxPooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.indices:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - self.dilationT, self.dilationW, self.dilationH, - self.ceil_mode - ) - return self.gradInput -end - -function VolumetricDilatedMaxPooling:clearState() - if self.indices then - self.indices:set() - end - return parent.clearState(self) -end - -function VolumetricDilatedMaxPooling:__tostring__() - local s = string.format('%s(%dx%dx%d, %d,%d,%d', torch.type(self), - self.kT, self.kW, self.kH, self.dT, self.dW, self.dH) - if (self.padT or self.padW or self.padH) and - (self.padT ~= 0 or self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padT.. ',' .. self.padW .. ','.. self.padH - end - s = s .. ', ' .. self.dilationT .. ',' .. self.dilationW .. ',' .. self.dilationH - s = s .. ')' - - return s -end diff --git a/contrib/lua-torch/nn/VolumetricDropout.lua b/contrib/lua-torch/nn/VolumetricDropout.lua deleted file mode 100644 index 809e28afe4..0000000000 --- a/contrib/lua-torch/nn/VolumetricDropout.lua +++ /dev/null @@ -1,55 +0,0 @@ -local VolumetricDropout, Parent = torch.class('nn.VolumetricDropout', 'nn.Module') - -function VolumetricDropout:__init(p,stochasticInference) - Parent.__init(self) - self.p = p or 0.5 - self.train = true - self.stochastic_inference = stochasticInference or false - self.noise = torch.Tensor() -end - -function VolumetricDropout:updateOutput(input) - self.output:resizeAs(input):copy(input) - if self.train or self.stochastic_inference then - if input:dim() == 5 then - self.noise:resize(input:size(1), input:size(2), 1, 1, 1) - elseif input:dim() == 4 then - self.noise:resize(input:size(1), 1, 1, 1) - else - error('Input must be 5D (nbatch, nfeat, t, h, w) or 4D (nfeat, t, h, w)') - end - self.noise:bernoulli(1-self.p) - -- We expand the random dropouts to the entire feature map because the - -- features are likely correlated across the map and so the dropout - -- should also be correlated. - self.output:cmul(torch.expandAs(self.noise, input)) - else - self.output:mul(1-self.p) - end - return self.output -end - -function VolumetricDropout:updateGradInput(input, gradOutput) - if self.train then - self.gradInput:resizeAs(gradOutput):copy(gradOutput) - self.gradInput:cmul(torch.expandAs(self.noise, input)) -- simply mask the gradients with the noise vector - else - error('backprop only defined while training') - end - return self.gradInput -end - -function VolumetricDropout:setp(p) - self.p = p -end - -function VolumetricDropout:__tostring__() - return string.format('%s(%f)', torch.type(self), self.p) -end - -function VolumetricDropout:clearState() - if self.noise then - self.noise:set() - end - return Parent.clearState(self) -end diff --git a/contrib/lua-torch/nn/VolumetricFractionalMaxPooling.lua b/contrib/lua-torch/nn/VolumetricFractionalMaxPooling.lua deleted file mode 100644 index f5ff58cf06..0000000000 --- a/contrib/lua-torch/nn/VolumetricFractionalMaxPooling.lua +++ /dev/null @@ -1,175 +0,0 @@ -local VolumetricFractionalMaxPooling, parent = - torch.class('nn.VolumetricFractionalMaxPooling', 'nn.Module') - --- Usage: --- nn.VolumetricFractionalMaxPooling(poolSizeT, poolSizeW, poolSizeH, outT, outW, outH) --- the output should be the exact size (outT x outH x outW) --- nn.VolumetricFractionalMaxPooling(poolSizeT, poolSizeW, poolSizeH, ratioT, ratioW, ratioH) --- the output should be the size (floor(inT x ratioT) x floor(inH x ratioH) x floor(inW x ratioW)) --- ratios are numbers between (0, 1) exclusive -function VolumetricFractionalMaxPooling:__init(poolSizeT, poolSizeW, poolSizeH, arg1, arg2, arg3) - parent.__init(self) - assert(poolSizeT >= 2) - assert(poolSizeW >= 2) - assert(poolSizeH >= 2) - - -- Pool size (how wide the pooling for each output unit is) - self.poolSizeT = poolSizeT - self.poolSizeW = poolSizeW - self.poolSizeH = poolSizeH - - -- Random samples are drawn for all - -- batch * plane * (time, height, width; i.e., 3) points. This determines - -- the 3d "pseudorandom" overlapping pooling regions for each - -- (batch element x input plane). A new set of random samples is - -- drawn every updateOutput call, unless we disable it via - -- :fixPoolingRegions(). - self.randomSamples = nil - - -- Flag to disable re-generation of random samples for producing - -- a new pooling. For testing purposes - self.newRandomPool = false - - if arg1 >= 1 and arg2 >= 1 and arg3 >= 1 then - -- Desired output size: the input tensor will determine the reduction - -- ratio - self.outT = arg1 - self.outW = arg2 - self.outH = arg3 - else - -- Reduction ratio specified per each input - -- This is the reduction ratio that we use - self.ratioT = arg1 - self.ratioW = arg2 - self.ratioH = arg3 - - -- The reduction ratio must be between 0 and 1 - assert(self.ratioT > 0 and self.ratioT < 1) - assert(self.ratioW > 0 and self.ratioW < 1) - assert(self.ratioH > 0 and self.ratioH < 1) - end -end - -function VolumetricFractionalMaxPooling:getBufferSize_(input) - local batchSize = 0 - local planeSize = 0 - - if input:nDimension() == 4 then - batchSize = 1 - planeSize = input:size(1) - elseif input:nDimension() == 5 then - batchSize = input:size(1) - planeSize = input:size(2) - else - error('input must be dim 4 or 5') - end - - return torch.LongStorage({batchSize, planeSize, 3}) -end - -function VolumetricFractionalMaxPooling:initSampleBuffer_(input) - local sampleBufferSize = self:getBufferSize_(input) - - if self.randomSamples == nil then - self.randomSamples = input.new():resize(sampleBufferSize):uniform() - elseif (self.randomSamples:size(1) ~= sampleBufferSize[1] or - self.randomSamples:size(2) ~= sampleBufferSize[2]) then - self.randomSamples:resize(sampleBufferSize):uniform() - else - if not self.newRandomPool then - -- Create new pooling windows, since this is a subsequent call - self.randomSamples:uniform() - end - end -end - -function VolumetricFractionalMaxPooling:getOutputSizes_(input) - local outT = self.outT - local outW = self.outW - local outH = self.outH - if self.ratioW ~= nil and self.ratioH ~= nil then - if input:nDimension() == 5 then - outT = math.floor(input:size(5) * self.ratioT) - outW = math.floor(input:size(4) * self.ratioW) - outH = math.floor(input:size(3) * self.ratioH) - elseif input:nDimension() == 4 then - outT = math.floor(input:size(4) * self.ratioT) - outW = math.floor(input:size(3) * self.ratioW) - outH = math.floor(input:size(2) * self.ratioH) - else - error('input must be dim 4 or 5') - end - - -- Neither can be smaller than 1 - assert(outT > 0, 'reduction ratio or input time too small') - assert(outW > 0, 'reduction ratio or input width too small') - assert(outH > 0, 'reduction ratio or input height too small') - else - assert(outT ~= nil and outW ~= nil and outH ~= nil) - end - - return outT, outW, outH -end - --- Call this to turn off regeneration of random pooling regions each --- updateOutput call. -function VolumetricFractionalMaxPooling:fixPoolingRegions(val) - if val == nil then - val = true - end - - self.newRandomPool = val - return self -end - -function VolumetricFractionalMaxPooling:updateOutput(input) - self.indices = self.indices or torch.LongTensor() - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.indices = torch.CudaLongTensor and self.indices:cudaLong() or self.indices - else - self.indices = self.indices:long() - end - self:initSampleBuffer_(input) - local outT, outW, outH = self:getOutputSizes_(input) - - input.THNN.VolumetricFractionalMaxPooling_updateOutput( - input:cdata(), - self.output:cdata(), - outT, outW, outH, self.poolSizeT, self.poolSizeW, self.poolSizeH, - self.indices:cdata(), self.randomSamples:cdata()) - return self.output -end - -function VolumetricFractionalMaxPooling:updateGradInput(input, gradOutput) - assert(self.randomSamples ~= nil, - 'must call updateOutput/forward first') - - local outT, outW, outH = self:getOutputSizes_(input) - - input.THNN.VolumetricFractionalMaxPooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - outT, outW, outH, self.poolSizeT, self.poolSizeW, self.poolSizeH, - self.indices:cdata()) - return self.gradInput -end - --- backward compat -function VolumetricFractionalMaxPooling:empty() - self:clearState() -end - -function VolumetricFractionalMaxPooling:clearState() - self.indices = nil - self.randomSamples = nil - return parent.clearState(self) -end - -function VolumetricFractionalMaxPooling:__tostring__() - return string.format('%s(%dx%dx%d, %d,%d,%d)', torch.type(self), - self.outT and self.outT or self.ratioT, - self.outW and self.outW or self.ratioW, - self.outH and self.outH or self.ratioH, - self.poolSizeT, self.poolSizeW, self.poolSizeH) -end diff --git a/contrib/lua-torch/nn/VolumetricFullConvolution.lua b/contrib/lua-torch/nn/VolumetricFullConvolution.lua deleted file mode 100644 index 0ce23401ea..0000000000 --- a/contrib/lua-torch/nn/VolumetricFullConvolution.lua +++ /dev/null @@ -1,225 +0,0 @@ -local THNN = require 'nn.THNN' -local VolumetricFullConvolution, parent = torch.class('nn.VolumetricFullConvolution','nn.Module') - -function VolumetricFullConvolution:__init(nInputPlane, nOutputPlane, - kT, kW, kH, -- kernel size - dT, dW, dH, -- stride - padT, padW, padH, -- padding - adjT, adjW, adjH) -- extra output adjustment - parent.__init(self) - - dW = dW or 1 - dH = dH or 1 - dT = dT or 1 - - self.nInputPlane = nInputPlane - self.nOutputPlane = nOutputPlane - self.kW = kW - self.kH = kH - self.kT = kT - self.dW = dW - self.dH = dH - self.dT = dT - self.padW = padW or 0 - self.padH = padH or 0 - self.padT = padT or 0 - self.adjW = adjW or 0 - self.adjH = adjH or 0 - self.adjT = adjT or 0 - - if self.adjW > self.dW - 1 or self.adjH > self.dH - 1 or self.adjT > self.dT - 1 then - error('adjW, adjH and adjT must be smaller than self.dW - 1,' .. - ' self.dH - 1 and self.dT - 1 respectively') - end - - self.weight = torch.Tensor(nInputPlane, nOutputPlane, kT, kH, kW) - self.gradWeight = torch.Tensor(nInputPlane, nOutputPlane, kT, kH, kW) - self.bias = torch.Tensor(self.nOutputPlane) - self.gradBias = torch.Tensor(self.nOutputPlane) - - self.ones = torch.Tensor() - self.finput = torch.Tensor() - self.fgradInput = torch.Tensor() - - self:reset() -end - -function VolumetricFullConvolution:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - local nInputPlane = self.nInputPlane - local kT = self.kT - local kH = self.kH - local kW = self.kW - stdv = 1/math.sqrt(kW*kH*kT*nInputPlane) - end - self.weight:uniform(-stdv, stdv) - self.bias:uniform(-stdv, stdv) -end - -local function calculateAdj(targetSize, ker, pad, stride) - return (targetSize + 2 * pad - ker) % stride -end - -function VolumetricFullConvolution:backCompatibility() - -- Transpose the weight when loading from an old version - if not self.adjW then - self.weight = self.weight:transpose(1, 2):contiguous() - end - - -- Rename the padding when loading from an old version - self.padW = self.padW or self.pW - self.padH = self.padH or self.pH - self.padT = self.padT or self.pT - - self.adjW = self.adjW or 0 - self.adjH = self.adjH or 0 - self.adjT = self.adjT or 0 -end - - -function VolumetricFullConvolution:noBias() - self.bias = nil - self.gradBias = nil - return self -end - -function VolumetricFullConvolution:updateOutput(input) - self:backCompatibility() - - local inputTensor = input - local adjT, adjW, adjH = self.adjT, self.adjW, self.adjH - - -- The input can be a table where the second element indicates the target - -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then - inputTensor = input[1] - local targetTensor = input[2] - local tDims = targetTensor:dim() - local tT = targetTensor:size(tDims-2) - local tH = targetTensor:size(tDims-1) - local tW = targetTensor:size(tDims) - adjT = calculateAdj(tT, self.kT, self.padT, self.dT) - adjW = calculateAdj(tW, self.kW, self.padW, self.dW) - adjH = calculateAdj(tH, self.kH, self.padH, self.dH) - end - - inputTensor.THNN.VolumetricFullConvolution_updateOutput( - inputTensor:cdata(), - self.output:cdata(), - self.weight:cdata(), - THNN.optionalTensor(self.bias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - adjT, adjW, adjH - ) - - return self.output -end - -function VolumetricFullConvolution:updateGradInput(input, gradOutput) - self:backCompatibility() - - local inputTensor = input - local adjT, adjW, adjH = self.adjT, self.adjW, self.adjH - - -- The input can be a table where the second element indicates the target - -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then - inputTensor = input[1] - local targetTensor = input[2] - local tDims = targetTensor:dim() - local tT = targetTensor:size(tDims-2) - local tH = targetTensor:size(tDims-1) - local tW = targetTensor:size(tDims) - adjT = calculateAdj(tT, self.kT, self.padT, self.dT) - adjW = calculateAdj(tW, self.kW, self.padW, self.dW) - adjH = calculateAdj(tH, self.kH, self.padH, self.dH) - -- Momentarily extract the gradInput tensor - if type(self.gradInput) == 'table' then - self.gradInput = self.gradInput[1] - end - end - - inputTensor.THNN.VolumetricFullConvolution_updateGradInput( - inputTensor:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.weight:cdata(), - self.finput:cdata(), - self.fgradInput:cdata(), - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - adjT, adjW, adjH - ) - - if type(input) == 'table' then - -- Create a zero tensor to be expanded and used as gradInput[2]. - self.zeroScalar = self.zeroScalar or input[2].new(1):zero() - self.ones:resize(input[2]:dim()):fill(1) - local zeroTensor = self.zeroScalar - :view(table.unpack(self.ones:totable())) - :expandAs(input[2]) - self.gradInput = {self.gradInput, zeroTensor} - end - - return self.gradInput -end - -function VolumetricFullConvolution:accGradParameters(input, gradOutput, scale) - self:backCompatibility() - - local inputTensor = input - local adjT, adjW, adjH = self.adjT, self.adjW, self.adjH - - -- The input can be a table where the second element indicates the target - -- output size, in which case the adj factors are computed automatically - if type(inputTensor) == 'table' then - inputTensor = input[1] - local targetTensor = input[2] - local tDims = targetTensor:dim() - local tT = targetTensor:size(tDims-2) - local tH = targetTensor:size(tDims-1) - local tW = targetTensor:size(tDims) - adjT = calculateAdj(tT, self.kT, self.padT, self.dT) - adjW = calculateAdj(tW, self.kW, self.padW, self.dW) - adjH = calculateAdj(tH, self.kH, self.padH, self.dH) - end - - inputTensor.THNN.VolumetricFullConvolution_accGradParameters( - inputTensor:cdata(), - gradOutput:cdata(), - self.gradWeight:cdata(), - THNN.optionalTensor(self.gradBias), - self.finput:cdata(), - self.fgradInput:cdata(), - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - adjT, adjW, adjH, - scale or 1 - ) -end - -function VolumetricFullConvolution:type(type, tensorCache) - self.finput = torch.Tensor() - self.fgradInput = torch.Tensor() - return parent.type(self, type, tensorCache) -end - -function VolumetricFullConvolution:__tostring__() - local s = string.format('%s(%d -> %d, %dx%dx%d', torch.type(self), - self.nInputPlane, self.nOutputPlane, self.kT, self.kW, self.kH) - if self.dT ~= 1 or self.dW ~= 1 or self.dH ~= 1 or self.padT ~= 0 or self.padW ~= 0 or self.padH ~= 0 then - s = s .. string.format(', %d,%d,%d', self.dT, self.dW, self.dH) - end - if (self.padT or self.padW or self.padH) and (self.padT ~= 0 or self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padT .. ',' .. self.padW .. ',' .. self.padH - end - if (self.adjT or self.adjW or self.adjH) and (self.adjT ~= 0 or self.adjW ~= 0 or self.adjH ~= 0) then - s = s .. ', ' .. self.adjT .. ',' .. self.adjW .. ',' .. self.adjH - end - return s .. ')' -end diff --git a/contrib/lua-torch/nn/VolumetricMaxPooling.lua b/contrib/lua-torch/nn/VolumetricMaxPooling.lua deleted file mode 100644 index e25c5b31c2..0000000000 --- a/contrib/lua-torch/nn/VolumetricMaxPooling.lua +++ /dev/null @@ -1,102 +0,0 @@ -local VolumetricMaxPooling, parent = torch.class('nn.VolumetricMaxPooling', 'nn.Module') - -VolumetricMaxPooling.__version = 2 - -function VolumetricMaxPooling:__init(kT, kW, kH, dT, dW, dH, padT, padW, padH) - parent.__init(self) - - dT = dT or kT - dW = dW or kW - dH = dH or kH - - self.kT = kT - self.kH = kH - self.kW = kW - self.dT = dT - self.dW = dW - self.dH = dH - - self.padT = padT or 0 - self.padW = padW or 0 - self.padH = padH or 0 - - - self.ceil_mode = false - self.indices = torch.LongTensor() -end - -function VolumetricMaxPooling:ceil() - self.ceil_mode = true - return self -end - -function VolumetricMaxPooling:floor() - self.ceil_mode = false - return self -end - -function VolumetricMaxPooling:updateOutput(input) - local dims = input:dim() - self.itime = input:size(dims-2) - self.iheight = input:size(dims-1) - self.iwidth = input:size(dims) - - self.indices = self.indices or torch.LongTensor() - if torch.typename(input):find('torch%.Cuda.*Tensor') then - self.indices = torch.CudaLongTensor and self.indices:cudaLong() or self.indices - else - self.indices = self.indices:long() - end - input.THNN.VolumetricMaxPooling_updateOutput( - input:cdata(), - self.output:cdata(), - self.indices:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - self.ceil_mode - ) - return self.output -end - -function VolumetricMaxPooling:updateGradInput(input, gradOutput) - input.THNN.VolumetricMaxPooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.indices:cdata(), - self.kT, self.kW, self.kH, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH, - self.ceil_mode - ) - return self.gradInput -end - -function VolumetricMaxPooling:empty() - self:clearState() -end - -function VolumetricMaxPooling:clearState() - if self.indices then self.indices:set() end - return parent.clearState(self) -end - -function VolumetricMaxPooling:read(file, version) - parent.read(self, file) - if version < 2 then - self.ceil_mode = false - end -end - -function VolumetricMaxPooling:__tostring__() - local s = string.format('%s(%dx%dx%d, %d,%d,%d', torch.type(self), - self.kT, self.kW, self.kH, self.dT, self.dW, self.dH) - if (self.padT or self.padW or self.padH) and - (self.padT ~= 0 or self.padW ~= 0 or self.padH ~= 0) then - s = s .. ', ' .. self.padT.. ',' .. self.padW .. ','.. self.padH - end - s = s .. ')' - - return s -end diff --git a/contrib/lua-torch/nn/VolumetricMaxUnpooling.lua b/contrib/lua-torch/nn/VolumetricMaxUnpooling.lua deleted file mode 100644 index 6291f5b858..0000000000 --- a/contrib/lua-torch/nn/VolumetricMaxUnpooling.lua +++ /dev/null @@ -1,56 +0,0 @@ -local VolumetricMaxUnpooling, parent = torch.class('nn.VolumetricMaxUnpooling', 'nn.Module') - -function VolumetricMaxUnpooling:__init(poolingModule) - parent.__init(self) - assert(torch.type(poolingModule)=='nn.VolumetricMaxPooling', 'Argument must be a nn.VolumetricMaxPooling module') - assert(poolingModule.kT==poolingModule.dT and poolingModule.kH==poolingModule.dH and poolingModule.kW==poolingModule.dW, "The size of pooling module's kernel must be equal to its stride") - self.pooling = poolingModule -end - -function VolumetricMaxUnpooling:setParams() - self.indices = self.pooling.indices - self.otime = self.pooling.itime - self.oheight = self.pooling.iheight - self.owidth = self.pooling.iwidth - self.dT = self.pooling.dT - self.dH = self.pooling.dH - self.dW = self.pooling.dW - self.padT = self.pooling.padT - self.padH = self.pooling.padH - self.padW = self.pooling.padW -end - -function VolumetricMaxUnpooling:updateOutput(input) - self:setParams() - input.THNN.VolumetricMaxUnpooling_updateOutput( - input:cdata(), - self.output:cdata(), - self.indices:cdata(), - self.otime, self.owidth, self.oheight, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH - ) - return self.output -end - -function VolumetricMaxUnpooling:updateGradInput(input, gradOutput) - self:setParams() - input.THNN.VolumetricMaxUnpooling_updateGradInput( - input:cdata(), - gradOutput:cdata(), - self.gradInput:cdata(), - self.indices:cdata(), - self.otime, self.owidth, self.oheight, - self.dT, self.dW, self.dH, - self.padT, self.padW, self.padH - ) - return self.gradInput -end - -function VolumetricMaxUnpooling:empty() - self:clearState() -end - -function VolumetricMaxUnpooling:__tostring__() - return 'nn.VolumetricMaxUnpooling associated to '..tostring(self.pooling) -end diff --git a/contrib/lua-torch/nn/VolumetricReplicationPadding.lua b/contrib/lua-torch/nn/VolumetricReplicationPadding.lua deleted file mode 100644 index 31a9503fdf..0000000000 --- a/contrib/lua-torch/nn/VolumetricReplicationPadding.lua +++ /dev/null @@ -1,58 +0,0 @@ -local VolumetricReplicationPadding, parent = - torch.class('nn.VolumetricReplicationPadding', 'nn.Module') - -function VolumetricReplicationPadding:__init(pleft, pright, ptop, pbottom, - pfront, pback) - parent.__init(self) - self.pleft = pleft - self.pright = pright or self.pleft - self.ptop = ptop or self.pleft - self.pbottom = pbottom or self.pleft - self.pfront = pfront or self.pleft - self.pback = pback or self.pleft -end - -function VolumetricReplicationPadding:updateOutput(input) - if input:dim() == 4 or input:dim() == 5 then - input.THNN.VolumetricReplicationPadding_updateOutput( - input:cdata(), self.output:cdata(), - self.pleft, self.pright, self.ptop, self.pbottom, self.pfront, - self.pback) - else - error('input must be 4 or 5-dimensional') - end - return self.output -end - -function VolumetricReplicationPadding:updateGradInput(input, gradOutput) - if input:dim() == 4 and gradOutput:dim() == 4 then - assert(input:size(1) == gradOutput:size(1) - and input:size(2) + self.pfront + self.pback == gradOutput:size(2) - and input:size(3) + self.ptop + self.pbottom == gradOutput:size(3) - and input:size(4) + self.pleft + self.pright == gradOutput:size(4), - 'input and gradOutput must be compatible in size') - elseif input:dim() == 5 and gradOutput:dim() == 5 then - assert(input:size(1) == gradOutput:size(1) - and input:size(2) == gradOutput:size(2) - and input:size(3) + self.pfront + self.pback == gradOutput:size(3) - and input:size(4) + self.ptop + self.pbottom == gradOutput:size(4) - and input:size(5) + self.pleft + self.pright == gradOutput:size(5), - 'input and gradOutput must be compatible in size') - else - error( - [[input and gradOutput must be 4 or 5-dimensional - and have equal number of dimensions]] - ) - end - input.THNN.VolumetricReplicationPadding_updateGradInput( - input:cdata(), gradOutput:cdata(), self.gradInput:cdata(), - self.pleft, self.pright, self.ptop, self.pbottom, self.pfront, self.pback) - return self.gradInput -end - -function VolumetricReplicationPadding:__tostring__() - return torch.type(self) .. - string.format('(left=%d, right=%d, top=%d, bottom=%d, front=%d, back=%d)', - self.pleft, self.pright, self.ptop, self.pbottom, - self.pfront, self.pback) -end diff --git a/contrib/lua-torch/nn/WeightNorm.lua b/contrib/lua-torch/nn/WeightNorm.lua deleted file mode 100644 index 3ffcd90aa0..0000000000 --- a/contrib/lua-torch/nn/WeightNorm.lua +++ /dev/null @@ -1,208 +0,0 @@ --- Weight Normalization --- https://arxiv.org/pdf/1602.07868v3.pdf -local WeightNorm, parent = torch.class("nn.WeightNorm", "nn.Decorator") - -function WeightNorm:__init(module, outputDim) - -- this container will apply Weight Normalization to any module it wraps - -- it accepts parameter ``outputDim`` that represents the dimension of the output of the weight - -- if outputDim is not 1, the container will transpose the weight - -- if the weight is not 2D, the container will view the weight into a 2D shape - -- that is nOut x (nIn x kw x dw x ...) - - parent.__init(self, module) - assert(module.weight) - - if module.bias then - self.bias = module.bias - self.gradBias = module.gradBias - end - self.gradWeight = module.gradWeight - self.weight = module.weight - - self.outputDim = outputDim or 1 - - -- track the non-output weight dimensions - self.otherDims = 1 - for i = 1, self.weight:dim() do - if i ~= self.outputDim then - self.otherDims = self.otherDims * self.weight:size(i) - end - end - - -- view size for weight norm 2D calculations - self.viewIn = torch.LongStorage({self.weight:size(self.outputDim), self.otherDims}) - - -- view size back to original weight - self.viewOut = self.weight:size() - self.weightSize = self.weight:size() - - -- bubble outputDim size up to the front - for i = self.outputDim - 1, 1, -1 do - self.viewOut[i], self.viewOut[i + 1] = self.viewOut[i + 1], self.viewOut[i] - end - - -- weight is reparametrized to decouple the length from the direction - -- such that w = g * ( v / ||v|| ) - self.v = torch.Tensor(self.viewIn[1], self.viewIn[2]) - self.g = torch.Tensor(self.viewIn[1]) - - self._norm = torch.Tensor(self.viewIn[1]) - self._scale = torch.Tensor(self.viewIn[1]) - - -- gradient of g - self.gradG = torch.Tensor(self.viewIn[1]):zero() - -- gradient of v - self.gradV = torch.Tensor(self.viewIn) - - self:resetInit() -end - -function WeightNorm:permuteIn(inpt) - local ans = inpt - for i = self.outputDim - 1, 1, -1 do - ans = ans:transpose(i, i+1) - end - return ans -end - -function WeightNorm:permuteOut(inpt) - local ans = inpt - for i = 1, self.outputDim - 1 do - ans = ans:transpose(i, i+1) - end - return ans -end - -function WeightNorm:resetInit(inputSize, outputSize) - self.v:normal(0, math.sqrt(2/self.viewIn[2])) - self.g:norm(self.v, 2, 2) - if self.bias then - self.bias:zero() - end -end - -function WeightNorm:evaluate() - if not(self.train == false) then - self:updateWeight() - parent.evaluate(self) - end -end - -function WeightNorm:updateWeight() - -- view to 2D when weight norm container operates - self.gradV:copy(self:permuteIn(self.weight)) - self.gradV = self.gradV:view(self.viewIn) - - -- ||w|| - self._norm:norm(self.v, 2, 2):pow(2):add(10e-5):sqrt() - -- g * w / ||w|| - self.gradV:copy(self.v) - self._scale:copy(self.g):cdiv(self._norm) - self.gradV:cmul(self._scale:view(self.viewIn[1], 1) - :expand(self.viewIn[1], self.viewIn[2])) - - -- otherwise maintain size of original module weight - self.gradV = self.gradV:view(self.viewOut) - - self.weight:copy(self:permuteOut(self.gradV)) -end - -function WeightNorm:updateOutput(input) - if not(self.train == false) then - self:updateWeight() - end - self.output:set(self.modules[1]:updateOutput(input)) - return self.output -end - -function WeightNorm:accGradParameters(input, gradOutput, scale) - scale = scale or 1 - self.modules[1]:accGradParameters(input, gradOutput, scale) - - self.weight:copy(self:permuteIn(self.weight)) - self.gradV:copy(self:permuteIn(self.gradWeight)) - self.weight = self.weight:view(self.viewIn) - - local norm = self._norm:view(self.viewIn[1], 1):expand(self.viewIn[1], self.viewIn[2]) - local scale = self._scale:view(self.viewIn[1], 1):expand(self.viewIn[1], self.viewIn[2]) - - -- dL / dw * (w / ||w||) - self.weight:copy(self.gradV) - self.weight:cmul(self.v):cdiv(norm) - self.gradG:sum(self.weight, 2) - - -- dL / dw * g / ||w|| - self.gradV:cmul(scale) - - -- dL / dg * (w * g / ||w||^2) - self.weight:copy(self.v):cmul(scale):cdiv(norm) - self.weight:cmul(self.gradG:view(self.viewIn[1], 1) - :expand(self.viewIn[1], self.viewIn[2])) - - -- dL / dv update - self.gradV:add(-1, self.weight) - - self.gradV = self.gradV:view(self.viewOut) - self.weight = self.weight:view(self.viewOut) - self.gradWeight:copy(self:permuteOut(self.gradV)) -end - -function WeightNorm:updateGradInput(input, gradOutput) - self.gradInput:set(self.modules[1]:updateGradInput(input, gradOutput)) - return self.gradInput -end - -function WeightNorm:zeroGradParameters() - self.modules[1]:zeroGradParameters() - self.gradV:zero() - self.gradG:zero() -end - -function WeightNorm:updateParameters(lr) - self.modules[1]:updateParameters(lr) - self.g:add(-lr, self.gradG) - self.v:add(-lr, self.gradV) -end - -function WeightNorm:parameters() - if self.bias then - return {self.v, self.g, self.bias}, {self.gradV, self.gradG, self.gradBias} - else - return {self.v, self.g}, {self.gradV, self.gradG} - end -end - -function WeightNorm:write(file) - -- Don't save weight and gradWeight since we can easily re-compute it from v - -- and g. - local weight = self.modules[1].weight - local gradWeight = self.modules[1].gradWeight - self.weight = nil - self.gradWeight = nil - self.modules[1].weight = nil - self.modules[1].gradWeight = nil - if not self.weightSize then - self.weightSize = weight:size() - end - - parent.write(self, file) - - self.modules[1].weight = weight - self.modules[1].gradWeight = gradWeight - self.weight = weight - self.gradWeight = gradWeight -end - -function WeightNorm:read(file) - parent.read(self, file) - - -- Re-compute weight and gradWeight - if not self.weight then - self.modules[1].weight = self.v.new(self.weightSize) - self.modules[1].gradWeight = self.v.new(self.weightSize) - self.weight = self.modules[1].weight - self.gradWeight = self.modules[1].gradWeight - self:updateWeight() - self.gradWeight:copy(self:permuteOut(self.gradV)) - end -end diff --git a/contrib/lua-torch/nn/WeightedEuclidean.lua b/contrib/lua-torch/nn/WeightedEuclidean.lua deleted file mode 100644 index dbf4158a9a..0000000000 --- a/contrib/lua-torch/nn/WeightedEuclidean.lua +++ /dev/null @@ -1,244 +0,0 @@ -local WeightedEuclidean, parent = torch.class('nn.WeightedEuclidean', 'nn.Module') - -function WeightedEuclidean:__init(inputSize,outputSize) - parent.__init(self) - - self.weight = torch.Tensor(inputSize,outputSize) - self.gradWeight = torch.Tensor(inputSize,outputSize) - - -- each template (output dim) has its own diagonal covariance matrix - self.diagCov = torch.Tensor(inputSize,outputSize) - self.gradDiagCov = torch.Tensor(inputSize,outputSize) - - self:reset() -end - -function WeightedEuclidean:reset(stdv) - if stdv then - stdv = stdv * math.sqrt(3) - else - stdv = 1./math.sqrt(self.weight:size(1)) - end - self.weight:uniform(-stdv, stdv) - self.diagCov:fill(1) -end - -local function view(res, src, ...) - local args = {...} - if src:isContiguous() then - res:view(src, table.unpack(args)) - else - res:reshape(src, table.unpack(args)) - end -end - -function WeightedEuclidean:updateOutput(input) - -- lazy-initialize - self._diagCov = self._diagCov or self.output.new() - - self._input = self._input or input.new() - self._weight = self._weight or self.weight.new() - self._expand = self._expand or self.output.new() - self._expand2 = self._expand or self.output.new() - self._expand3 = self._expand3 or self.output.new() - self._repeat = self._repeat or self.output.new() - self._repeat2 = self._repeat2 or self.output.new() - self._repeat3 = self._repeat3 or self.output.new() - - local inputSize, outputSize = self.weight:size(1), self.weight:size(2) - - -- y_j = || c_j * (w_j - x) || - if input:dim() == 1 then - view(self._input, input, inputSize, 1) - self._expand:expandAs(self._input, self.weight) - self._repeat:resizeAs(self._expand):copy(self._expand) - self._repeat:add(-1, self.weight) - self._repeat:cmul(self.diagCov) - self.output:norm(self._repeat, 2, 1) - self.output:resize(outputSize) - elseif input:dim() == 2 then - local batchSize = input:size(1) - - view(self._input, input, batchSize, inputSize, 1) - self._expand:expand(self._input, batchSize, inputSize, outputSize) - -- make the expanded tensor contiguous (requires lots of memory) - self._repeat:resizeAs(self._expand):copy(self._expand) - - self._weight:view(self.weight, 1, inputSize, outputSize) - self._expand2:expandAs(self._weight, self._repeat) - - self._diagCov:view(self.diagCov, 1, inputSize, outputSize) - self._expand3:expandAs(self._diagCov, self._repeat) - if torch.type(input) == 'torch.CudaTensor' then - -- requires lots of memory, but minimizes cudaMallocs and loops - self._repeat2:resizeAs(self._expand2):copy(self._expand2) - self._repeat:add(-1, self._repeat2) - self._repeat3:resizeAs(self._expand3):copy(self._expand3) - self._repeat:cmul(self._repeat3) - else - self._repeat:add(-1, self._expand2) - self._repeat:cmul(self._expand3) - end - - self.output:norm(self._repeat, 2, 2) - self.output:resize(batchSize, outputSize) - else - error"1D or 2D input expected" - end - return self.output -end - -function WeightedEuclidean:updateGradInput(input, gradOutput) - if not self.gradInput then - return - end - - self._div = self._div or input.new() - self._output = self._output or self.output.new() - self._expand4 = self._expand4 or input.new() - self._gradOutput = self._gradOutput or input.new() - - if not self.fastBackward then - self:updateOutput(input) - end - - local inputSize, outputSize = self.weight:size(1), self.weight:size(2) - - --[[ - dy_j -2 * c_j * c_j * (w_j - x) c_j * c_j * (x - w_j) - ---- = -------------------------- = --------------------- - dx 2 || c_j * (w_j - x) || y_j - --]] - - -- to prevent div by zero (NaN) bugs - self._output:resizeAs(self.output):copy(self.output):add(0.0000001) - view(self._gradOutput, gradOutput, gradOutput:size()) - self._div:cdiv(gradOutput, self._output) - if input:dim() == 1 then - self._div:resize(1, outputSize) - self._expand4:expandAs(self._div, self.weight) - - if torch.type(input) == 'torch.CudaTensor' then - self._repeat2:resizeAs(self._expand4):copy(self._expand4) - self._repeat2:cmul(self._repeat) - else - self._repeat2:cmul(self._repeat, self._expand4) - end - - self._repeat2:cmul(self.diagCov) - self.gradInput:sum(self._repeat2, 2) - self.gradInput:resizeAs(input) - elseif input:dim() == 2 then - local batchSize = input:size(1) - - self._div:resize(batchSize, 1, outputSize) - self._expand4:expand(self._div, batchSize, inputSize, outputSize) - - if torch.type(input) == 'torch.CudaTensor' then - self._repeat2:resizeAs(self._expand4):copy(self._expand4) - self._repeat2:cmul(self._repeat) - self._repeat2:cmul(self._repeat3) - else - self._repeat2:cmul(self._repeat, self._expand4) - self._repeat2:cmul(self._expand3) - end - - self.gradInput:sum(self._repeat2, 3) - self.gradInput:resizeAs(input) - else - error"1D or 2D input expected" - end - - return self.gradInput -end - -function WeightedEuclidean:accGradParameters(input, gradOutput, scale) - local inputSize, outputSize = self.weight:size(1), self.weight:size(2) - scale = scale or 1 - - --[[ - dy_j 2 * c_j * c_j * (w_j - x) c_j * c_j * (w_j - x) - ---- = ------------------------- = --------------------- - dw_j 2 || c_j * (w_j - x) || y_j - - dy_j 2 * c_j * (w_j - x)^2 c_j * (w_j - x)^2 - ---- = ----------------------- = ----------------- - dc_j 2 || c_j * (w_j - x) || y_j - --]] - -- assumes a preceding call to updateGradInput - if input:dim() == 1 then - self.gradWeight:add(-scale, self._repeat2) - - self._repeat:cdiv(self.diagCov) - self._repeat:cmul(self._repeat) - self._repeat:cmul(self.diagCov) - - if torch.type(input) == 'torch.CudaTensor' then - self._repeat2:resizeAs(self._expand4):copy(self._expand4) - self._repeat2:cmul(self._repeat) - else - self._repeat2:cmul(self._repeat, self._expand4) - end - - self.gradDiagCov:add(self._repeat2) - elseif input:dim() == 2 then - self._sum = self._sum or input.new() - self._sum:sum(self._repeat2, 1) - self._sum:resize(inputSize, outputSize) - self.gradWeight:add(-scale, self._sum) - - if torch.type(input) == 'torch.CudaTensor' then - -- requires lots of memory, but minimizes cudaMallocs and loops - self._repeat:cdiv(self._repeat3) - self._repeat:cmul(self._repeat) - self._repeat:cmul(self._repeat3) - self._repeat2:resizeAs(self._expand4):copy(self._expand4) - self._repeat:cmul(self._repeat2) - else - self._repeat:cdiv(self._expand3) - self._repeat:cmul(self._repeat) - self._repeat:cmul(self._expand3) - self._repeat:cmul(self._expand4) - end - - self._sum:sum(self._repeat, 1) - self._sum:resize(inputSize, outputSize) - self.gradDiagCov:add(scale, self._sum) - else - error"1D or 2D input expected" - end -end - -function WeightedEuclidean:type(type, tensorCache) - if type then - -- prevent premature memory allocations - self._input = nil - self._output = nil - self._gradOutput = nil - self._weight = nil - self._div = nil - self._sum = nil - self._expand = nil - self._expand2 = nil - self._expand3 = nil - self._expand4 = nil - self._repeat = nil - self._repeat2 = nil - self._repeat3 = nil - end - return parent.type(self, type, tensorCache) -end - -function WeightedEuclidean:parameters() - return {self.weight, self.diagCov}, {self.gradWeight, self.gradDiagCov} -end - -function WeightedEuclidean:accUpdateGradParameters(input, gradOutput, lr) - local gradWeight = self.gradWeight - local gradDiagCov = self.gradDiagCov - self.gradWeight = self.weight - self.gradDiagCov = self.diagCov - self:accGradParameters(input, gradOutput, -lr) - self.gradWeight = gradWeight - self.gradDiagCov = gradDiagCov -end diff --git a/contrib/lua-torch/nn/WeightedMSECriterion.lua b/contrib/lua-torch/nn/WeightedMSECriterion.lua deleted file mode 100644 index 933472937a..0000000000 --- a/contrib/lua-torch/nn/WeightedMSECriterion.lua +++ /dev/null @@ -1,45 +0,0 @@ -local WeightedMSECriterion, parent = torch.class('nn.WeightedMSECriterion','nn.MSECriterion') - -function WeightedMSECriterion:__init(w) - parent.__init(self) - self.weight = w:clone() -end - -function WeightedMSECriterion:updateOutput(input,target) - self.buffer = self.buffer or input.new() - self.buffer:resizeAs(input):copy(target) - if input:dim() - 1 == self.weight:dim() then - for i=1,input:size(1) do - self.buffer[i]:cmul(self.weight) - end - else - self.buffer:cmul(self.weight) - end - self.output_tensor = self.output_tensor or input.new(1) - input.THNN.MSECriterion_updateOutput( - input:cdata(), - self.buffer:cdata(), - self.output_tensor:cdata(), - self.sizeAverage - ) - self.output = self.output_tensor[1] - return self.output -end - -function WeightedMSECriterion:updateGradInput(input, target) - self.buffer:resizeAs(input):copy(target) - if input:dim() - 1 == self.weight:dim() then - for i=1,input:size(1) do - self.buffer[i]:cmul(self.weight) - end - else - self.buffer:cmul(self.weight) - end - input.THNN.MSECriterion_updateGradInput( - input:cdata(), - self.buffer:cdata(), - self.gradInput:cdata(), - self.sizeAverage - ) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/WhiteNoise.lua b/contrib/lua-torch/nn/WhiteNoise.lua deleted file mode 100644 index f1defb6463..0000000000 --- a/contrib/lua-torch/nn/WhiteNoise.lua +++ /dev/null @@ -1,40 +0,0 @@ -local WhiteNoise, parent = torch.class('nn.WhiteNoise', 'nn.Module') - -function WhiteNoise:__init(mean, std) - parent.__init(self) - self.mean = mean or 0 - self.std = std or 0.1 - self.noise = torch.Tensor() -end - -function WhiteNoise:updateOutput(input) - self.output:resizeAs(input):copy(input) - if self.train ~= false then - self.noise:resizeAs(input) - self.noise:normal(self.mean, self.std) - self.output:add(self.noise) - else - if self.mean ~= 0 then - self.output:add(self.mean) - end - end - return self.output -end - -function WhiteNoise:updateGradInput(input, gradOutput) - if self.train ~= false then - -- Simply return the gradients. - self.gradInput:resizeAs(gradOutput):copy(gradOutput) - else - error('backprop only defined while training') - end - return self.gradInput -end - -function WhiteNoise:clearState() - self.noise:set() -end - -function WhiteNoise:__tostring__() - return string.format('%s mean: %f, std: %f', torch.type(self), self.mean, self.std) -end diff --git a/contrib/lua-torch/nn/ZeroGrad.lua b/contrib/lua-torch/nn/ZeroGrad.lua deleted file mode 100644 index 7c941ce1c6..0000000000 --- a/contrib/lua-torch/nn/ZeroGrad.lua +++ /dev/null @@ -1,14 +0,0 @@ -local ZeroGrad, parent = torch.class('nn.ZeroGrad', 'nn.Module') - -function ZeroGrad:updateOutput(input) - self.output:set(input) - return self.output -end - --- the gradient is simply zeroed. --- useful when you don't want to backpropgate through certain paths. -function ZeroGrad:updateGradInput(input, gradOutput) - self.gradInput = nn.utils.recursiveResizeAs(self.gradInput, input) - self.gradInput = nn.utils.recursiveFill(self.gradInput, 0) - return self.gradInput -end diff --git a/contrib/lua-torch/nn/ZipTable.lua b/contrib/lua-torch/nn/ZipTable.lua deleted file mode 100644 index 7b18619eb7..0000000000 --- a/contrib/lua-torch/nn/ZipTable.lua +++ /dev/null @@ -1,34 +0,0 @@ -local ZipTable, parent = torch.class('nn.ZipTable', 'nn.Module') - --- input : { {a1,a2}, {b1,b2}, {c1,c2} } --- output : { {a1,b1,c1}, {a2,b2,c2} } -function ZipTable:__init() - parent.__init(self) - self.output = {} - self.gradInput = {} -end - -function ZipTable:updateOutput(inputTable) - self.output = {} - for i,inTable in ipairs(inputTable) do - for j,input in ipairs(inTable) do - local output = self.output[j] or {} - output[i] = input - self.output[j] = output - end - end - return self.output -end - -function ZipTable:updateGradInput(inputTable, gradOutputTable) - self.gradInput = {} - for i,gradOutTable in ipairs(gradOutputTable) do - for j,gradOutput in ipairs(gradOutTable) do - local gradInput = self.gradInput[j] or {} - gradInput[i] = gradOutput - self.gradInput[j] = gradInput - end - end - return self.gradInput -end - diff --git a/contrib/lua-torch/nn/ZipTableOneToMany.lua b/contrib/lua-torch/nn/ZipTableOneToMany.lua deleted file mode 100644 index d4a80fe0dd..0000000000 --- a/contrib/lua-torch/nn/ZipTableOneToMany.lua +++ /dev/null @@ -1,37 +0,0 @@ -local ZipTableOneToMany, parent = torch.class('nn.ZipTableOneToMany', 'nn.Module') - --- based on ZipTable in dpnn - --- input : { v, {a, b, c} } --- output : { {v,a}, {v,b}, {v,c} } -function ZipTableOneToMany:__init() - parent.__init(self) - self.output = {} - self.gradInput = {} - -- make buffer to update during forward/backward - self.gradInputEl = torch.Tensor() -end - -function ZipTableOneToMany:updateOutput(input) - assert(#input == 2, "input must be table of element and table") - local inputEl, inputTable = input[1], input[2] - self.output = {} - for i,v in ipairs(inputTable) do - self.output[i] = {inputEl, v} - end - return self.output -end - -function ZipTableOneToMany:updateGradInput(input, gradOutput) - assert(#input == 2, "input must be table of element and table") - local inputEl, inputTable = input[1], input[2] - self.gradInputEl:resizeAs(inputEl):zero() - local gradInputTable = {} - for i,gradV in ipairs(gradOutput) do - self.gradInputEl:add(gradV[1]) - gradInputTable[i] = gradV[2] - end - self.gradInput = {self.gradInputEl, gradInputTable} - return self.gradInput -end - diff --git a/contrib/lua-torch/nn/hessian.lua b/contrib/lua-torch/nn/hessian.lua deleted file mode 100644 index b841d8c593..0000000000 --- a/contrib/lua-torch/nn/hessian.lua +++ /dev/null @@ -1,391 +0,0 @@ ----------------------------------------------------------------------- --- hessian.lua: this file appends extra methods to modules in nn, --- to estimate diagonal elements of the Hessian. This is useful --- to condition learning rates individually. ----------------------------------------------------------------------- -nn.hessian = {} - ----------------------------------------------------------------------- --- Hessian code is still experimental, --- and deactivated by default ----------------------------------------------------------------------- -function nn.hessian.enable() - - local function accDiagHessianParameters(module, input, diagHessianOutput, gw, hw) - if #gw ~= #hw then - error('Number of gradients is nto equal to number of hessians') - end - module.inputSq = module.inputSq or input.new() - module.inputSq:resizeAs(input) - torch.cmul(module.inputSq, input, input) - -- replace gradients with hessian - for i=1,#gw do - local gwname = gw[i] - local hwname = hw[i] - local gwval = module[gwname] - local hwval = module[hwname] - if hwval == nil then - module[hwname] = gwval.new():resizeAs(gwval) - hwval = module[hwname] - end - module[gwname] = hwval - module[hwname] = gwval - end - local oldOutput = module.output - module.output = module.output.new():resizeAs(oldOutput) - module.forward(module, module.inputSq) - module.accGradParameters(module, module.inputSq, diagHessianOutput, 1) - -- put back gradients - for i=1,#gw do - local gwname = gw[i] - local hwname = hw[i] - local gwval = module[gwname] - local hwval = module[hwname] - module[gwname] = hwval - module[hwname] = gwval - end - module.output = oldOutput - end - nn.hessian.accDiagHessianParameters = accDiagHessianParameters - - local function updateDiagHessianInput(module, input, diagHessianOutput, w, wsq) - if #w ~= #wsq then - error('Number of weights is not equal to number of weights squares') - end - module.diagHessianInput = module.diagHessianInput or input.new() - module.diagHessianInput:resizeAs(input):zero() - - local gi = module.gradInput - module.gradInput = module.diagHessianInput - for i=1,#w do - local wname = w[i] - local wsqname = wsq[i] - local wval = module[wname] - local wsqval = module[wsqname] - if wsqval == nil then - module[wsqname] = wval.new() - wsqval = module[wsqname] - end - wsqval:resizeAs(wval) - torch.cmul(wsqval, wval, wval) - module[wsqname] = wval - module[wname] = wsqval - end - module.updateGradInput(module,input,diagHessianOutput) - for i=1,#w do - local wname = w[i] - local wsqname = wsq[i] - local wval = module[wname] - local wsqval = module[wsqname] - module[wname] = wsqval - module[wsqname] = wval - end - module.gradInput = gi - end - nn.hessian.updateDiagHessianInput = updateDiagHessianInput - - local function updateDiagHessianInputPointWise(module, input, diagHessianOutput) - local tdh = diagHessianOutput.new():resizeAs(diagHessianOutput):fill(1) - updateDiagHessianInput(module,input,tdh,{},{}) - module.diagHessianInput:cmul(module.diagHessianInput) - module.diagHessianInput:cmul(diagHessianOutput) - end - nn.hessian.updateDiagHessianInputPointWise = updateDiagHessianInputPointWise - - local function initDiagHessianParameters(module,gw,hw) - module.diagHessianInput = module.diagHessianInput or module.gradInput.new(); - for i=1,#gw do - module[hw[i]] = module[hw[i]] or module[gw[i]].new():resizeAs(module[gw[i]]) - end - end - nn.hessian.initDiagHessianParameters = initDiagHessianParameters - - ---------------------------------------------------------------------- - -- Module - ---------------------------------------------------------------------- - function nn.Module.updateDiagHessianInput(self, input, diagHessianOutput) - error(torch.typename(self) .. ':updateDiagHessianInput() is undefined') - end - - function nn.Module.accDiagHessianParameters(self, input, diagHessianOutput) - end - - function nn.Module.initDiagHessianParameters() - end - - ---------------------------------------------------------------------- - -- Sequential - ---------------------------------------------------------------------- - function nn.Sequential.initDiagHessianParameters(self) - for i=1,#self.modules do - self.modules[i]:initDiagHessianParameters() - end - end - - function nn.Sequential.updateDiagHessianInput(self, input, diagHessianOutput) - local currentDiagHessianOutput = diagHessianOutput - local currentModule = self.modules[#self.modules] - for i=#self.modules-1,1,-1 do - local previousModule = self.modules[i] - currentDiagHessianOutput = currentModule:updateDiagHessianInput(previousModule.output, currentDiagHessianOutput) - currentModule = previousModule - end - currentDiagHessianOutput = currentModule:updateDiagHessianInput(input, currentDiagHessianOutput) - self.diagHessianInput = currentDiagHessianOutput - return currentDiagHessianOutput - end - - function nn.Sequential.accDiagHessianParameters(self, input, diagHessianOutput) - local currentDiagHessianOutput = diagHessianOutput - local currentModule = self.modules[#self.modules] - for i=#self.modules-1,1,-1 do - local previousModule = self.modules[i] - currentModule:accDiagHessianParameters(previousModule.output, currentDiagHessianOutput) - currentDiagHessianOutput = currentModule.diagHessianInput - currentModule = previousModule - end - currentModule:accDiagHessianParameters(input, currentDiagHessianOutput) - end - - ---------------------------------------------------------------------- - -- Criterion - ---------------------------------------------------------------------- - function nn.Criterion.updateDiagHessianInput(self, input, diagHessianOutput) - error(torch.typename(self) .. ':updateDiagHessianInput() is undefined') - end - - ---------------------------------------------------------------------- - -- MSECriterion - ---------------------------------------------------------------------- - function nn.MSECriterion.updateDiagHessianInput(self, input, target) - self.diagHessianInput = self.diagHessianInput or input.new() - local val = 2 - if self.sizeAverage then - val = val / input:nElement() - end - self.diagHessianInput:resizeAs(input):fill(val) - return self.diagHessianInput - end - - ---------------------------------------------------------------------- - -- WeightedMSECriterion - ---------------------------------------------------------------------- - function nn.WeightedMSECriterion.updateDiagHessianInput(self,input,target) - return nn.MSECriterion.updateDiagHessianInput(self,input,target) - end - - ---------------------------------------------------------------------- - -- L1Cost - ---------------------------------------------------------------------- - function nn.L1Cost.updateDiagHessianInput(self,input) - self.diagHessianInput = self.diagHessianInput or input.new() - self.diagHessianInput:resizeAs(input) - self.diagHessianInput:fill(1) - self.diagHessianInput[torch.eq(input,0)] = 0 - return self.diagHessianInput - end - - ---------------------------------------------------------------------- - -- Linear - ---------------------------------------------------------------------- - function nn.Linear.updateDiagHessianInput(self, input, diagHessianOutput) - updateDiagHessianInput(self, input, diagHessianOutput, {'weight'}, {'weightSq'}) - return self.diagHessianInput - end - - function nn.Linear.accDiagHessianParameters(self, input, diagHessianOutput) - accDiagHessianParameters(self,input, diagHessianOutput, {'gradWeight','gradBias'}, {'diagHessianWeight','diagHessianBias'}) - end - - function nn.Linear.initDiagHessianParameters(self) - initDiagHessianParameters(self,{'gradWeight','gradBias'},{'diagHessianWeight','diagHessianBias'}) - end - - ---------------------------------------------------------------------- - -- SpatialConvolution - ---------------------------------------------------------------------- - function nn.SpatialConvolution.updateDiagHessianInput(self, input, diagHessianOutput) - updateDiagHessianInput(self, input, diagHessianOutput, {'weight'}, {'weightSq'}) - return self.diagHessianInput - end - - function nn.SpatialConvolution.accDiagHessianParameters(self, input, diagHessianOutput) - accDiagHessianParameters(self,input, diagHessianOutput, {'gradWeight','gradBias'}, {'diagHessianWeight','diagHessianBias'}) - end - - function nn.SpatialConvolution.initDiagHessianParameters(self) - initDiagHessianParameters(self,{'gradWeight','gradBias'},{'diagHessianWeight','diagHessianBias'}) - end - - ---------------------------------------------------------------------- - -- SpatialConvolutionLocal - ---------------------------------------------------------------------- - function nn.SpatialConvolutionLocal.updateDiagHessianInput(self, input, diagHessianOutput) - updateDiagHessianInput(self, input, diagHessianOutput, {'weight'}, {'weightSq'}) - return self.diagHessianInput - end - - function nn.SpatialConvolutionLocal.accDiagHessianParameters(self, input, diagHessianOutput) - accDiagHessianParameters(self,input, diagHessianOutput, {'gradWeight','gradBias'}, {'diagHessianWeight','diagHessianBias'}) - end - - function nn.SpatialConvolutionLocal.initDiagHessianParameters(self) - initDiagHessianParameters(self,{'gradWeight','gradBias'},{'diagHessianWeight','diagHessianBias'}) - end - - ---------------------------------------------------------------------- - -- SpatialFullConvolution - ---------------------------------------------------------------------- - function nn.SpatialFullConvolution.updateDiagHessianInput(self, input, diagHessianOutput) - updateDiagHessianInput(self, input, diagHessianOutput, {'weight'}, {'weightSq'}) - return self.diagHessianInput - end - - function nn.SpatialFullConvolution.accDiagHessianParameters(self, input, diagHessianOutput) - accDiagHessianParameters(self,input, diagHessianOutput, {'gradWeight','gradBias'}, {'diagHessianWeight','diagHessianBias'}) - end - - function nn.SpatialFullConvolution.initDiagHessianParameters(self) - initDiagHessianParameters(self,{'gradWeight','gradBias'},{'diagHessianWeight','diagHessianBias'}) - end - - ---------------------------------------------------------------------- - -- SpatialConvolutionMap - ---------------------------------------------------------------------- - function nn.SpatialConvolutionMap.updateDiagHessianInput(self, input, diagHessianOutput) - updateDiagHessianInput(self, input, diagHessianOutput, {'weight','bias'}, {'weightSq','biasSq'}) - return self.diagHessianInput - end - - function nn.SpatialConvolutionMap.accDiagHessianParameters(self, input, diagHessianOutput) - accDiagHessianParameters(self,input, diagHessianOutput, {'gradWeight','gradBias'}, {'diagHessianWeight','diagHessianBias'}) - end - - function nn.SpatialConvolutionMap.initDiagHessianParameters(self) - initDiagHessianParameters(self,{'gradWeight','gradBias'},{'diagHessianWeight','diagHessianBias'}) - end - - ---------------------------------------------------------------------- - -- SpatialFullConvolutionMap - ---------------------------------------------------------------------- - function nn.SpatialFullConvolutionMap.updateDiagHessianInput(self, input, diagHessianOutput) - updateDiagHessianInput(self, input, diagHessianOutput, {'weight'}, {'weightSq'}) - return self.diagHessianInput - end - - function nn.SpatialFullConvolutionMap.accDiagHessianParameters(self, input, diagHessianOutput) - accDiagHessianParameters(self,input, diagHessianOutput, {'gradWeight','gradBias'}, {'diagHessianWeight','diagHessianBias'}) - end - - function nn.SpatialFullConvolutionMap.initDiagHessianParameters(self) - initDiagHessianParameters(self,{'gradWeight','gradBias'},{'diagHessianWeight','diagHessianBias'}) - end - ----------------------------------------------------------------------- - -- Tanh - ---------------------------------------------------------------------- - function nn.Tanh.updateDiagHessianInput(self, input, diagHessianOutput) - updateDiagHessianInputPointWise(self, input, diagHessianOutput) - return self.diagHessianInput - end - - ---------------------------------------------------------------------- - -- TanhShrink - ---------------------------------------------------------------------- - function nn.TanhShrink.updateDiagHessianInput(self, input, diagHessianOutput) - updateDiagHessianInputPointWise(self.tanh, input, diagHessianOutput) - self.diagHessianInput = self.diagHessianInput or input.new():resizeAs(input) - torch.add(self.diagHessianInput, self.tanh.diagHessianInput, diagHessianOutput) - return self.diagHessianInput - end - - ---------------------------------------------------------------------- - -- Square - ---------------------------------------------------------------------- - function nn.Square.updateDiagHessianInput(self, input, diagHessianOutput) - updateDiagHessianInputPointWise(self, input, diagHessianOutput) - return self.diagHessianInput - end - - ---------------------------------------------------------------------- - -- Sqrt - ---------------------------------------------------------------------- - function nn.Sqrt.updateDiagHessianInput(self, input, diagHessianOutput) - updateDiagHessianInputPointWise(self, input, diagHessianOutput) - return self.diagHessianInput - end - - ---------------------------------------------------------------------- - -- Reshape - ---------------------------------------------------------------------- - function nn.Reshape.updateDiagHessianInput(self, input, diagHessianOutput) - self.diagHessianInput = self.diagHessianInput or input.new() - diagHessianOutput = diagHessianOutput:contiguous() - self.diagHessianInput:set(diagHessianOutput):resizeAs(input) - return self.diagHessianInput - end - - ---------------------------------------------------------------------- - -- Parameters manipulation: - -- we modify these functions such that they return hessian coefficients - ---------------------------------------------------------------------- - function nn.Module.parameters(self) - if self.weight and self.bias then - return {self.weight, self.bias}, {self.gradWeight, self.gradBias}, {self.diagHessianWeight, self.diagHessianBias} - elseif self.weight then - return {self.weight}, {self.gradWeight}, {self.diagHessianWeight} - elseif self.bias then - return {self.bias}, {self.gradBias}, {self.diagHessianBias} - else - return - end - end - - function nn.Module.getParameters(self) - -- get parameters - local parameters,gradParameters,hessianParameters = self:parameters() - -- flatten parameters and gradients - local flatParameters = nn.Module.flatten(parameters) - collectgarbage() - local flatGradParameters = nn.Module.flatten(gradParameters) - collectgarbage() - local flatHessianParameters - if hessianParameters and hessianParameters[1] then - flatHessianParameters = nn.Module.flatten(hessianParameters) - collectgarbage() - end - - -- return new flat vector that contains all discrete parameters - return flatParameters, flatGradParameters, flatHessianParameters - end - - function nn.Sequential.parameters(self) - local function tinsert(to, from) - if type(from) == 'table' then - for i=1,#from do - tinsert(to,from[i]) - end - else - table.insert(to,from) - end - end - local w = {} - local gw = {} - local ggw = {} - for i=1,#self.modules do - local mw,mgw,mggw = self.modules[i]:parameters() - if mw then - tinsert(w,mw) - tinsert(gw,mgw) - tinsert(ggw,mggw) - end - end - return w,gw,ggw - end - - ---------------------------------------------------------------------- - -- Avoid multiple calls to enable() - ---------------------------------------------------------------------- - function nn.hessian.enable() - end -end diff --git a/contrib/lua-torch/nn/init.lua b/contrib/lua-torch/nn/init.lua deleted file mode 100755 index 4319a88687..0000000000 --- a/contrib/lua-torch/nn/init.lua +++ /dev/null @@ -1,221 +0,0 @@ -require('torch') - -nn = {} -- define the global nn table - -require('nn.THNN') - -require('nn.utils') - - -require('nn.ErrorMessages') -require('nn.Module') - -require('nn.Container') -require('nn.Concat') -require('nn.Parallel') -require('nn.Sequential') -require('nn.DepthConcat') - -require('nn.Decorator') -require('nn.Bottle') -require('nn.WeightNorm') -require('nn.DontCast') -require('nn.NaN') -require('nn.Profile') - -require('nn.Linear') -require('nn.LinearWeightNorm') -require('nn.Bilinear') -require('nn.PartialLinear') -require('nn.SparseLinear') -require('nn.IndexLinear') -require('nn.Reshape') -require('nn.View') -require('nn.Contiguous') -require('nn.Select') -require('nn.Narrow') -require('nn.Index') -require('nn.Squeeze') -require('nn.Unsqueeze') -require('nn.Replicate') -require('nn.Transpose') -require('nn.BatchNormalization') -require('nn.LayerNormalization') -require('nn.Padding') -require('nn.GradientReversal') -require('nn.MaskedSelect') - -require('nn.Copy') -require('nn.Min') -require('nn.Max') -require('nn.Sum') -require('nn.Mean') -require('nn.CMul') -require('nn.Mul') -require('nn.MulConstant') -require('nn.CAdd') -require('nn.Add') -require('nn.AddConstant') -require('nn.Constant') -require('nn.Dropout') -require('nn.SpatialDropout') -require('nn.VolumetricDropout') -require('nn.WhiteNoise') -require('nn.OneHot') -require('nn.PrintSize') -require('nn.ZeroGrad') - -require('nn.CAddTable') -require('nn.CDivTable') -require('nn.CMulTable') -require('nn.CSubTable') -require('nn.CMaxTable') -require('nn.CMinTable') -require('nn.CAddTensorTable') - -require('nn.Euclidean') -require('nn.WeightedEuclidean') -require('nn.PairwiseDistance') -require('nn.CosineDistance') -require('nn.DotProduct') -require('nn.Normalize') -require('nn.Cosine') -require('nn.Kmeans') - -require('nn.Exp') -require('nn.Log') -require('nn.HardTanh') -require('nn.Clamp') -require('nn.LogSigmoid') -require('nn.LogSoftMax') -require('nn.Sigmoid') -require('nn.SoftMax') -require('nn.SoftMin') -require('nn.SoftPlus') -require('nn.SoftSign') -require('nn.Tanh') -require('nn.TanhShrink') -require('nn.Abs') -require('nn.Power') -require('nn.Square') -require('nn.Sqrt') -require('nn.HardShrink') -require('nn.SoftShrink') -require('nn.Threshold') -require('nn.Maxout') -require('nn.ReLU') -require('nn.ReLU6') -require('nn.PReLU') -require('nn.CReLU') -require('nn.LeakyReLU') -require('nn.SpatialSoftMax') -require('nn.SpatialLogSoftMax') -require('nn.RReLU') -require('nn.ELU') -require('nn.GatedLinearUnit') - -require('nn.LookupTable') -require('nn.SpatialConvolution') -require('nn.SpatialConvolutionLocal') -require('nn.SpatialFullConvolution') -require('nn.SpatialFullConvolutionMap') -require('nn.SpatialConvolutionMM') -require('nn.SpatialDepthWiseConvolution') -require('nn.SpatialConvolutionMap') -require('nn.SpatialDilatedConvolution') -require('nn.SpatialSubSampling') -require('nn.SpatialMaxPooling') -require('nn.SpatialDilatedMaxPooling') -require('nn.SpatialMaxUnpooling') -require('nn.SpatialFractionalMaxPooling') -require('nn.SpatialLPPooling') -require('nn.SpatialAveragePooling') -require('nn.SpatialAdaptiveMaxPooling') -require('nn.SpatialAdaptiveAveragePooling') -require('nn.TemporalConvolution') -require('nn.TemporalSubSampling') -require('nn.TemporalMaxPooling') -require('nn.TemporalDynamicKMaxPooling') -require('nn.TemporalRowConvolution') -require('nn.SpatialSubtractiveNormalization') -require('nn.SpatialDivisiveNormalization') -require('nn.SpatialContrastiveNormalization') -require('nn.SpatialCrossMapLRN') -require('nn.SpatialZeroPadding') -require('nn.SpatialReflectionPadding') -require('nn.SpatialReplicationPadding') -require('nn.SpatialUpSamplingNearest') -require('nn.SpatialUpSamplingBilinear') -require('nn.SpatialBatchNormalization') - -require('nn.VolumetricConvolution') -require('nn.VolumetricFullConvolution') -require('nn.VolumetricDilatedConvolution') -require('nn.VolumetricMaxPooling') -require('nn.VolumetricDilatedMaxPooling') -require('nn.VolumetricFractionalMaxPooling') -require('nn.VolumetricMaxUnpooling') -require('nn.VolumetricAveragePooling') -require('nn.VolumetricBatchNormalization') -require('nn.VolumetricReplicationPadding') - -require('nn.GPU') - -require('nn.ParallelTable') -require('nn.Identity') -require('nn.ConcatTable') -require('nn.SplitTable') -require('nn.JoinTable') -require('nn.SelectTable') -require('nn.MixtureTable') -require('nn.CriterionTable') -require('nn.FlattenTable') -require('nn.NarrowTable') -require('nn.MapTable') -require('nn.ZipTable') -require('nn.ZipTableOneToMany') -require('nn.Collapse') -require('nn.Convert') - -require('nn.Criterion') -require('nn.MSECriterion') -require('nn.SpatialAutoCropMSECriterion') -require('nn.SmoothL1Criterion') -require('nn.MarginCriterion') -require('nn.SoftMarginCriterion') -require('nn.AbsCriterion') -require('nn.ClassNLLCriterion') -require('nn.SpatialClassNLLCriterion') -require('nn.ClassSimplexCriterion') -require('nn.DistKLDivCriterion') -require('nn.MultiCriterion') -require('nn.L1HingeEmbeddingCriterion') -require('nn.HingeEmbeddingCriterion') -require('nn.CosineEmbeddingCriterion') -require('nn.MarginRankingCriterion') -require('nn.MultiMarginCriterion') -require('nn.MultiLabelMarginCriterion') -require('nn.MultiLabelSoftMarginCriterion') -require('nn.L1Cost') -require('nn.L1Penalty') -require('nn.WeightedMSECriterion') -require('nn.BCECriterion') -require('nn.CrossEntropyCriterion') -require('nn.ParallelCriterion') -require('nn.DistanceRatioCriterion') -require('nn.ModuleCriterion') - -require('nn.PixelShuffle') - -require('nn.StochasticGradient') - -require('nn.MM') -require('nn.MV') - -require('nn.Jacobian') -require('nn.SparseJacobian') -require('nn.hessian') -require('nn.test') - - -return nn diff --git a/contrib/lua-torch/nn/lib/CMakeLists.txt b/contrib/lua-torch/nn/lib/CMakeLists.txt deleted file mode 100644 index de04595f6b..0000000000 --- a/contrib/lua-torch/nn/lib/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR) -CMAKE_POLICY(VERSION 2.6) -SET(THNN_INSTALL_LIB_SUBDIR "${RSPAMD_LIBDIR}") -SET(THNN_INSTALL_INCLUDE_SUBDIR "${Torch_INSTALL_INCLUDE_SUBDIR}") -ADD_SUBDIRECTORY(THNN) \ No newline at end of file diff --git a/contrib/lua-torch/nn/lib/THNN/CMakeLists.txt b/contrib/lua-torch/nn/lib/THNN/CMakeLists.txt deleted file mode 100644 index 57f9e2e193..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/CMakeLists.txt +++ /dev/null @@ -1,49 +0,0 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR) -CMAKE_POLICY(VERSION 2.6) - -IF(NOT TH_LIBRARIES) - SET(TH_LIBRARIES "TH") -ENDIF(NOT TH_LIBRARIES) -MESSAGE(STATUS "TH_LIBRARIES: ${TH_LIBRARIES}") - -IF(NOT THNN_INSTALL_LIB_SUBDIR) - SET(THNN_INSTALL_LIB_SUBDIR "lib" CACHE PATH "THNN install library directory") - SET(THNN_INSTALL_INCLUDE_SUBDIR "include" CACHE PATH "THNN install include subdirectory") -ENDIF() - -# Flags -# When using MSVC -IF(MSVC) - # we want to respect the standard, and we are bored of those **** . - ADD_DEFINITIONS(-D_CRT_SECURE_NO_DEPRECATE=1) - ADD_DEFINITIONS(-DTH_EXPORTS) -ENDIF(MSVC) - -IF (CMAKE_VERSION VERSION_LESS "3.1") - SET(CMAKE_C_FLAGS "-std=c99 ${CMAKE_C_FLAGS}") -ELSE () - SET(CMAKE_C_STANDARD 99) -ENDIF () - -SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w") - -IF (WITH_OPENMP) - FIND_PACKAGE(OpenMP) - IF(OPENMP_FOUND) - MESSAGE(STATUS "Compiling with OpenMP support") - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") - SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}") - ENDIF(OPENMP_FOUND) -ENDIF (WITH_OPENMP) - -SET(src init.c) -ADD_LIBRARY(THNN SHARED init.c) -INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) -### Torch packages supposes libraries prefix is "lib" -SET_TARGET_PROPERTIES(THNN PROPERTIES - PREFIX "lib" - IMPORT_PREFIX "lib") - -TARGET_LINK_LIBRARIES(THNN ${TH_LIBRARIES}) -INSTALL(TARGETS THNN DESTINATION ${RSPAMD_LIBDIR}) diff --git a/contrib/lua-torch/nn/lib/THNN/README.md b/contrib/lua-torch/nn/lib/THNN/README.md deleted file mode 100644 index e6c61601d1..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# THNN - -THNN is a library that gathers nn's C implementations of neural network modules. It's entirely free of Lua dependency and therefore can be used in any application that has a C FFI. Please note that it only contains quite low level functions, and an object oriented C/C++ wrapper will be created soon as another library. - -There is also a CUDA counterpart of THNN (THCUNN) in the [cunn repository](https://github.com/torch/cunn/tree/master/lib/THCUNN). - -## Links - -* [API reference](doc/api_reference.md) -* [Style guidelines](doc/style_guidelines.md) - -## Motivation - -Torch's neural network package (nn) provided many optimized C implementations of modules, but the source files contained Lua specific code and headers so they couldn't be easily compiled and included anywhere else. - -THNN is based on the same code, but is written in pure C, so it can be easily included in other code. **Future C implementations should be committed to THNN.** - -## API - -THNN is a purely functional library. It provides 2-3 functions for each module, that perform the most important operations: - -* **updateOutput** - applies the module to an input -* **updateGradInput** - accepts gradient w.r.t. output and previous module input, and computes a gradient w.r.t. that input -* **accGradParameters** - *(optional, only modules with parameters)* accepts gradient w.r.t. output and previous module input, and computes gradient w.r.t. the parameters - -For information on argument types please check the [API reference](doc/api_reference.md). - -## Developer docs - -* [Style guidelines](doc/style_guidelines.md) - -This section will be expanded when FFI refactoring will be finished. diff --git a/contrib/lua-torch/nn/lib/THNN/THNN.h b/contrib/lua-torch/nn/lib/THNN/THNN.h deleted file mode 100644 index 0019b7976c..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/THNN.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef THNN_H -#define THNN_H - -#include -#include -#ifdef _OPENMP -#include -#endif - -#define THNN_(NAME) TH_CONCAT_3(THNN_, Real, NAME) - -#define THIndexTensor THLongTensor -#define THIndexTensor_(NAME) THLongTensor_ ## NAME - -#define THIntegerTensor THIntTensor -#define THIntegerTensor_(NAME) THIntTensor_ ## NAME - -typedef long THIndex_t; -typedef int THInteger_t; -typedef void THNNState; - -#define THNN_resizeAs_indices(I1, I2) \ - THLongStorage *size2 = THIndexTensor_(newSizeOf)(I2); \ - if (!THTensor_(isSize)(I1, size2)) \ - { \ - THTensor_(resize)(I1, size2, NULL); \ - } \ - THLongStorage_free(size2); - -#include "generic/THNN.h" -#include - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/Abs.c b/contrib/lua-torch/nn/lib/THNN/generic/Abs.c deleted file mode 100644 index 28721ec8ea..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/Abs.c +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/Abs.c" -#else - -void THNN_(Abs_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output) -{ - THTensor_(resizeAs)(output, input); - THTensor_(abs)(output, input); -} - -void THNN_(Abs_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput) -{ - THNN_CHECK_NELEMENT(input, gradOutput); - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, - real z = *input_data; - *gradInput_data = *gradOutput_data * (z >= 0 ? 1 : -1); - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/AbsCriterion.c b/contrib/lua-torch/nn/lib/THNN/generic/AbsCriterion.c deleted file mode 100644 index 9bee5de9e4..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/AbsCriterion.c +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/AbsCriterion.c" -#else - -void THNN_(AbsCriterion_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *output, - bool sizeAverage) -{ - real sum = 0; - THNN_CHECK_NELEMENT(input, target); - TH_TENSOR_APPLY2(real, input, real, target, - sum += fabs(*input_data - *target_data); - ); - - if (sizeAverage) - sum /= THTensor_(nElement)(input); - - THTensor_(set1d)(output, 0, sum); -} - -void THNN_(AbsCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *gradInput, - bool sizeAverage) -{ - THNN_CHECK_NELEMENT(input, target); - real norm = (sizeAverage ? 1./((real)THTensor_(nElement)(input)) : 1.); - - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - *gradInput_data = (*input_data - *target_data) >= 0 ? norm : -norm; - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/BCECriterion.c b/contrib/lua-torch/nn/lib/THNN/generic/BCECriterion.c deleted file mode 100644 index 637a4067ef..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/BCECriterion.c +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/BCECriterion.c" -#else - -#define EPS 1e-12 - -void THNN_(BCECriterion_updateOutput)(THNNState *state, THTensor *input, - THTensor *target, THTensor *output, - bool sizeAverage, THTensor *weights) -{ - THNN_CHECK_NELEMENT(input, target); - THNN_CHECK_NELEMENT(input, weights); - THNN_CHECK_DIM_SIZE(output, 1, 0, 1); - real sum = 0; - - if(weights) - TH_TENSOR_APPLY3(real, input, real, target, real, weights, - real x = *input_data; - real y = *target_data; - real w = *weights_data; - THAssertMsg(x >= 0. && x <= 1., - "input value should be between 0~1, but got %f", - (double) x); - sum -= (log(x + EPS) * y + log(1. - x + EPS) * (1. - y)) * w; - ) - else - TH_TENSOR_APPLY2(real, input, real, target, - real x = *input_data; - real y = *target_data; - THAssertMsg(x >= 0. && x <= 1., - "input value should be between 0~1, but got %f", - (double) x); - sum -= log(x + EPS) * y + log(1. - x + EPS) * (1. - y); - ); - - - if (sizeAverage) - sum /= THTensor_(nElement)(input); - - THTensor_(set1d)(output, 0, sum); -} - -void THNN_(BCECriterion_updateGradInput)(THNNState *state, THTensor *input, - THTensor *target, THTensor *gradInput, - bool sizeAverage, THTensor *weights) -{ - THNN_CHECK_NELEMENT(input, target); - THNN_CHECK_NELEMENT(input, weights); - - real norm = (sizeAverage ? 1./((real)THTensor_(nElement)(input)) : 1.); - - THTensor_(resizeAs)(gradInput, input); - - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - real x = *input_data; - real y = *target_data; - *gradInput_data = - norm * (y - x) / ((1. - x + EPS) * (x + EPS)); - ); - - if(weights) - THTensor_(cmul)(gradInput, gradInput, weights); -} - -#undef EPS - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/BatchNormalization.c b/contrib/lua-torch/nn/lib/THNN/generic/BatchNormalization.c deleted file mode 100644 index b8f4627909..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/BatchNormalization.c +++ /dev/null @@ -1,149 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/BatchNormalization.c" -#else - -void THNN_(BatchNormalization_updateOutput)( - THNNState *state, THTensor *input, THTensor *output, - THTensor *weight, THTensor *bias, - THTensor *running_mean, THTensor *running_var, - THTensor *save_mean, THTensor *save_std, - bool train, double momentum, double eps) -{ - THTensor_(resizeAs)(output, input); - long nInput = THTensor_(size)(input, 1); - long f; - ptrdiff_t n = THTensor_(nElement)(input) / nInput; - - #pragma omp parallel for - for (f = 0; f < nInput; ++f) { - THTensor *in = THTensor_(newSelect)(input, 1, f); - THTensor *out = THTensor_(newSelect)(output, 1, f); - - real mean, invstd; - - if (train) { - // compute mean per input - accreal sum = 0; - TH_TENSOR_APPLY(real, in, sum += *in_data;); - - mean = (real) sum / n; - THTensor_(set1d)(save_mean, f, (real) mean); - - // compute variance per input - sum = 0; - TH_TENSOR_APPLY(real, in, - sum += (*in_data - mean) * (*in_data - mean);); - - if (sum == 0 && eps == 0.0) { - invstd = 0; - } else { - invstd = (real) (1 / sqrt(sum/n + eps)); - } - THTensor_(set1d)(save_std, f, (real) invstd); - - // update running averages - THTensor_(set1d)(running_mean, f, - (real) (momentum * mean + (1 - momentum) * THTensor_(get1d)(running_mean, f))); - - accreal unbiased_var = sum / (n - 1); - THTensor_(set1d)(running_var, f, - (real) (momentum * unbiased_var + (1 - momentum) * THTensor_(get1d)(running_var, f))); - } else { - mean = THTensor_(get1d)(running_mean, f); - invstd = 1 / sqrt(THTensor_(get1d)(running_var, f) + eps); - } - - // compute output - real w = weight ? THTensor_(get1d)(weight, f) : 1; - real b = bias ? THTensor_(get1d)(bias, f) : 0; - - TH_TENSOR_APPLY2(real, in, real, out, - *out_data = (real) (((*in_data - mean) * invstd) * w + b);); - - THTensor_(free)(out); - THTensor_(free)(in); - } -} - -void THNN_(BatchNormalization_backward)( - THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, - THTensor *gradWeight, THTensor *gradBias, THTensor *weight, - THTensor *running_mean, THTensor *running_var, - THTensor *save_mean, THTensor *save_std, - bool train, double scale, double eps) -{ - THNN_CHECK_SHAPE(input, gradOutput); - long nInput = THTensor_(size)(input, 1); - long f; - ptrdiff_t n = THTensor_(nElement)(input) / nInput; - - #pragma omp parallel for - for (f = 0; f < nInput; ++f) { - THTensor *in = THTensor_(newSelect)(input, 1, f); - THTensor *gradOut = THTensor_(newSelect)(gradOutput, 1, f); - real w = weight ? THTensor_(get1d)(weight, f) : 1; - real mean, invstd; - if (train) { - mean = THTensor_(get1d)(save_mean, f); - invstd = THTensor_(get1d)(save_std, f); - } else { - mean = THTensor_(get1d)(running_mean, f); - invstd = 1 / sqrt(THTensor_(get1d)(running_var, f) + eps); - } - - // sum over all gradOutput in feature plane - accreal sum = 0; - TH_TENSOR_APPLY(real, gradOut, sum += *gradOut_data;); - - // dot product of the Q(X) and gradOuput - accreal dotp = 0; - TH_TENSOR_APPLY2(real, in, real, gradOut, - dotp += (*in_data - mean) * (*gradOut_data);); - - if (gradInput) { - THTensor_(resizeAs)(gradInput, input); - THTensor *gradIn = THTensor_(newSelect)(gradInput, 1, f); - - if (train) { - // when in training mode - // Q(X) = X - E[x] ; i.e. input centered to zero mean - // Y = Q(X) / σ ; i.e. BN output before weight and bias - // dL/dX = (Q(dL/dY) - dot(Y, dL/dY) * Y) / σ * w - - // projection of gradOutput on to output scaled by std - real k = (real) dotp * invstd * invstd / n; - TH_TENSOR_APPLY2(real, gradIn, real, in, - *gradIn_data = (*in_data - mean) * k;); - - accreal gradMean = sum / n; - TH_TENSOR_APPLY2(real, gradIn, real, gradOut, - *gradIn_data = (*gradOut_data - gradMean - *gradIn_data) * invstd * w;); - - } else { - // when in evaluation mode - // Q(X) = X - running_mean ; i.e. input centered to zero mean - // Y = Q(X) / running_std ; i.e. BN output before weight and bias - // dL/dX = w / running_std - TH_TENSOR_APPLY2(real, gradIn, real, gradOut, - *gradIn_data = *gradOut_data * invstd * w;); - } - - THTensor_(free)(gradIn); - } - - if (gradWeight) { - real val = THTensor_(get1d)(gradWeight, f); - THTensor_(set1d)(gradWeight, f, val + scale * dotp * invstd); - } - - if (gradBias) { - real val = THTensor_(get1d)(gradBias, f); - THTensor_(set1d)(gradBias, f, val + scale * sum); - } - - THTensor_(free)(gradOut); - THTensor_(free)(in); - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/ClassNLLCriterion.c b/contrib/lua-torch/nn/lib/THNN/generic/ClassNLLCriterion.c deleted file mode 100644 index 4cf37aeaf9..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/ClassNLLCriterion.c +++ /dev/null @@ -1,163 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/ClassNLLCriterion.c" -#else - -void THNN_(ClassNLLCriterion_updateOutput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *output, - bool sizeAverage, - THTensor *weights, - THTensor *total_weight, - long ignore_index) -{ - THNN_CHECK_DIM_SIZE(output, 1, 0, 1); - THNN_CHECK_DIM_SIZE(total_weight, 1, 0, 1); - int n_dims = THTensor_(nDimension)(input); - int n_classes = THTensor_(size)(input, n_dims - 1); - ignore_index -= TH_INDEX_BASE; - - if (THIndexTensor_(nDimension)(target) > 1) { - THError("multi-target not supported"); - } - if (THTensor_(nDimension)(input) > 2) { - THError("input tensor should be 1D or 2D"); - } - if (weights && THTensor_(nElement)(weights) != n_classes) { - THDescBuff s1 = THTensor_(sizeDesc)(weights); - THError("weight tensor should be defined either for all %d classes or no classes" - " but got weight tensor of shape: %s", n_classes, s1.str); - } - - input = THTensor_(newContiguous)(input); - target = THIndexTensor_(newContiguous)(target); - weights = weights ? THTensor_(newContiguous)(weights) : NULL; - - real *input_data = THTensor_(data)(input); - THIndex_t *target_data = THIndexTensor_(data)(target); - real *weights_data = weights ? THTensor_(data)(weights) : NULL; - real *output_data = THTensor_(data)(output); - real *total_weight_data = THTensor_(data)(total_weight); - - output_data[0] = total_weight_data[0] = 0.0; - - if (THTensor_(nDimension)(input) == 1) { - int cur_target = target_data[0] - TH_INDEX_BASE; - if (cur_target != ignore_index) { - THAssert(cur_target >= 0 && cur_target < n_classes); - total_weight_data[0] = weights ? weights_data[cur_target] : 1.0f; - output_data[0] = -input_data[cur_target] * total_weight_data[0]; - } - } else if (THTensor_(nDimension)(input) == 2) { - int batch_size = THTensor_(size)(input, 0); - THAssert(THIndexTensor_(size)(target, 0) == batch_size); - - int n_target = THTensor_(size)(input, 1); - - int i; - for (i = 0; i < batch_size; i++) { - int cur_target = target_data[i] - TH_INDEX_BASE; - if (cur_target != ignore_index) { - THAssert(cur_target >= 0 && cur_target < n_classes); - - real cur_weight = weights ? weights_data[cur_target] : 1.0f; - total_weight_data[0] += cur_weight; - output_data[0] -= input_data[i * n_target + cur_target] * cur_weight; - } - } - } - - if (sizeAverage && total_weight_data[0]) { - output_data[0] /= total_weight_data[0]; - } - - if (weights) { - THTensor_(free)(weights); - } - THTensor_(free)(input); - THIndexTensor_(free)(target); -} - -void THNN_(ClassNLLCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *gradInput, - bool sizeAverage, - THTensor *weights, - THTensor *total_weight, - long ignore_index) -{ - int n_dims = THTensor_(nDimension)(input); - int n_classes = THTensor_(size)(input, n_dims - 1); - ignore_index -= TH_INDEX_BASE; - - if (!THTensor_(isContiguous)(gradInput)) { - THError("gradInput must be contiguous"); - } - - real *total_weight_data = THTensor_(data)(total_weight); - - if (!(*total_weight_data > 0)) { - return; - } - - if (THIndexTensor_(nDimension)(target) > 1) { - THError("multi-target not supported"); - } - - if (THTensor_(nDimension)(input) > 2) { - THError("input tensor should be 1D or 2D"); - } - - if (weights && THTensor_(nElement)(weights) != n_classes) { - THError("weight tensor should be defined either for all or no classes"); - } - - target = THIndexTensor_(newContiguous)(target); - weights = weights ? THTensor_(newContiguous)(weights) : NULL; - - THIndex_t *target_data = THIndexTensor_(data)(target); - real *weights_data = weights ? THTensor_(data)(weights) : NULL; - real *gradInput_data = THTensor_(data)(gradInput); - - if (THTensor_(nDimension)(input) == 1) { - int cur_target = target_data[0] - TH_INDEX_BASE; - if (cur_target != ignore_index) { - THAssert(cur_target >= 0 && cur_target < n_classes); - - gradInput_data[cur_target] = - (!sizeAverage && weights) ? -weights_data[cur_target] : -1; - } - - } else if (THTensor_(nDimension)(input) == 2) { - int batch_size = THTensor_(size)(input, 0); - THAssert(THIndexTensor_(size)(target, 0) == batch_size); - - int n_target = THTensor_(size)(input, 1); - - int i; - for (i = 0; i < batch_size; i++){ - int cur_target = target_data[i] - TH_INDEX_BASE; - - if (cur_target != ignore_index) { - THAssert(cur_target >= 0 && cur_target < n_classes); - - gradInput_data[i * n_target + cur_target] = - -(weights ? weights_data[cur_target] : 1.0f); - - if (sizeAverage && *total_weight_data) { - gradInput_data[i * n_target + cur_target] /= *total_weight_data; - } - } - } - } - - THIndexTensor_(free)(target); - if (weights) { - THTensor_(free)(weights); - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/DistKLDivCriterion.c b/contrib/lua-torch/nn/lib/THNN/generic/DistKLDivCriterion.c deleted file mode 100644 index 6bd6aa0675..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/DistKLDivCriterion.c +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/DistKLDivCriterion.c" -#else - -void THNN_(DistKLDivCriterion_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *output, - bool sizeAverage) -{ - THNN_CHECK_NELEMENT(input, target); - THNN_CHECK_DIM_SIZE(output, 1, 0, 1); - - real sum = 0; - - TH_TENSOR_APPLY2(real, input, real, target, - sum += *target_data > 0 ? *target_data * (log(*target_data) - *input_data) : 0; - ); - - if (sizeAverage) - sum /= THTensor_(nElement)(input); - - THTensor_(set1d)(output, 0, sum); -} - -void THNN_(DistKLDivCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *gradInput, - bool sizeAverage) -{ - THNN_CHECK_NELEMENT(input, target); - - real norm = (sizeAverage ? 1./((real)THTensor_(nElement)(input)) : 1.); - - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - *gradInput_data = *target_data > 0 ? norm * (-*target_data) : 0; - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/ELU.c b/contrib/lua-torch/nn/lib/THNN/generic/ELU.c deleted file mode 100644 index ddcfb9705d..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/ELU.c +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/ELU.c" -#else - -void THNN_(ELU_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal alpha_, - bool inplace) -{ - real alpha = TH_CONVERT_ACCREAL_TO_REAL(alpha_); - if(inplace) { - TH_TENSOR_APPLY(real, input, - if(*input_data <= 0) { - *input_data = (exp(*input_data) - 1) * alpha; - } - ); - THTensor_(set)(output, input); - } else { - THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY2(real, input, real, output, - *output_data = *input_data <= 0 ? (exp(*input_data)-1)*alpha : *input_data; - ); - } -} - -void THNN_(ELU_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output, - accreal alpha_, - bool inplace) -{ - real alpha = TH_CONVERT_ACCREAL_TO_REAL(alpha_); - THNN_CHECK_NELEMENT(input, gradOutput); - if(inplace) { - TH_TENSOR_APPLY2(real, gradOutput, real, output, - if(*output_data <= 0) { - *gradOutput_data *= *output_data + alpha; - } - ); - THTensor_(set)(gradInput, gradOutput); - } else { - THTensor_(resizeAs)(gradInput, output); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, - *gradInput_data = *output_data <= 0 ? *gradOutput_data * (*output_data + alpha) : *gradOutput_data; - ); - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/FusedRNNKernel.c b/contrib/lua-torch/nn/lib/THNN/generic/FusedRNNKernel.c deleted file mode 100644 index 30788b0a26..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/FusedRNNKernel.c +++ /dev/null @@ -1,55 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/FusedRNNKernel.c" -#else - -void THNN_(GRUFused_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *hidden, - THTensor *bias1, - THTensor *bias2, - THTensor *hx, - THTensor *hy, - THTensor *storage) -{ - THAssertMsg(false, "Not implemented for CPU"); -} - -void THNN_(GRUFused_updateGradInput)( - THNNState *state, - THTensor *gradInInput, - THTensor *gradInHidden, - THTensor *gradOutput, - THTensor *gradInputHx, - THTensor *storage) -{ - THAssertMsg(false, "Not implemented for CPU"); -} - -void THNN_(LSTMFused_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *hidden, - THTensor *bias1, - THTensor *bias2, - THTensor *cx, - THTensor *hy, - THTensor *cy) -{ - THAssertMsg(false, "Not implemented for CPU"); -} - -void THNN_(LSTMFused_updateGradInput)( - THNNState *state, - THTensor *storage, - THTensor *gradInGates, - THTensor *prevC, - THTensor *cy, - THTensor *gradOutput, - THTensor *gradOutputCell, - THTensor *gradInputCx) -{ - THAssertMsg(false, "Not implemented for CPU"); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/GatedLinearUnit.c b/contrib/lua-torch/nn/lib/THNN/generic/GatedLinearUnit.c deleted file mode 100644 index 274a27e3b0..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/GatedLinearUnit.c +++ /dev/null @@ -1,73 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/GatedLinearUnit.c" -#else - -void THNN_(GatedLinear_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int dim) -{ - // size output to half of input - dim = dim - TH_INDEX_BASE; - const long nIn = THTensor_(size)(input, dim); - THArgCheck(nIn % 2 == 0, 2, "Halving dimension must be even. Dim %d is size %ld", - dim + TH_INDEX_BASE, nIn); - - const long inputSize = THTensor_(size)(input, dim) / 2; - THLongStorage *newSizes = THTensor_(newSizeOf)(input); - THLongStorage_set(newSizes, dim, inputSize); - THTensor_(resize)(output, newSizes, NULL); - - // halve tensor - THTensor *firstHalf = THTensor_(newNarrow)(input, dim, 0, inputSize); - THTensor *secondHalf = THTensor_(newNarrow)(input, dim, inputSize, inputSize); - - // x = x1:cmul( sigmoid(x2) ) - THTensor_(sigmoid)(output, secondHalf); - THTensor_(cmul)(output, output, firstHalf); - - THLongStorage_free(newSizes); - THTensor_(free)(firstHalf); - THTensor_(free)(secondHalf); -} - -void THNN_(GatedLinear_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int dim) -{ - // set up tensors - dim = dim - TH_INDEX_BASE; - const long nIn = THTensor_(size)(input, dim); - THArgCheck(nIn % 2 == 0, 2, "Halving dimension must be even. Dim %d is size %ld", - dim + TH_INDEX_BASE, nIn); - - THTensor_(resizeAs)(gradInput, input); - const long inputSize = THTensor_(size)(input, dim) / 2; - THTensor *firstHalf = THTensor_(newNarrow)(input, dim, 0, inputSize); - THTensor *secondHalf = THTensor_(newNarrow)(input, dim, inputSize, inputSize); - THTensor *gradInputfirstHalf = THTensor_(newNarrow)(gradInput, dim, 0, inputSize); - THTensor *gradInputsecondHalf = THTensor_(newNarrow)(gradInput, dim, inputSize, inputSize); - - THTensor_(sigmoid)(gradInputfirstHalf, secondHalf); - - TH_TENSOR_APPLY2(real, gradInputsecondHalf, real, gradInputfirstHalf, - real z = *gradInputfirstHalf_data; - *gradInputsecondHalf_data = (1. - z) * z; - ); - - THTensor_(cmul)(gradInputfirstHalf, gradInputfirstHalf, gradOutput); - - THTensor_(cmul)(gradInputsecondHalf, gradInputsecondHalf, gradOutput); - THTensor_(cmul)(gradInputsecondHalf, gradInputsecondHalf, firstHalf); - - THTensor_(free)(firstHalf); - THTensor_(free)(secondHalf); - THTensor_(free)(gradInputfirstHalf); - THTensor_(free)(gradInputsecondHalf); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/HardShrink.c b/contrib/lua-torch/nn/lib/THNN/generic/HardShrink.c deleted file mode 100644 index aaae85bac8..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/HardShrink.c +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/HardShrink.c" -#else - -void THNN_(HardShrink_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal lambda_) -{ - real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); - THTensor_(resizeAs)(output, input); - - TH_TENSOR_APPLY2(real, output, real, input, - if (*input_data > lambda) - *output_data = *input_data; - else if (*input_data < -lambda) - *output_data = *input_data; - else - *output_data = 0; - ); -} - -void THNN_(HardShrink_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - accreal lambda_) -{ - real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); - THNN_CHECK_NELEMENT(input, gradOutput); - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, - if (*input_data > lambda || *input_data < -lambda) - *gradInput_data = *gradOutput_data; - else - *gradInput_data = 0; - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/HardTanh.c b/contrib/lua-torch/nn/lib/THNN/generic/HardTanh.c deleted file mode 100644 index 589a66e150..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/HardTanh.c +++ /dev/null @@ -1,133 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/HardTanh.c" -#else - -void THNN_(HardTanh_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal min_val_, - accreal max_val_, - bool inplace) -{ - real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_); - real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_); - if (inplace) - THTensor_(set)(output, input); - else - THTensor_(resizeAs)(output, input); - - if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) - { - if (inplace) - TH_TENSOR_APPLY(real, input, - if (*input_data < min_val) - *input_data = min_val; - else if (*input_data > max_val) - *input_data = max_val; - ); - TH_TENSOR_APPLY2(real, output, real, input, - if (*input_data < min_val) - *output_data = min_val; - else if (*input_data <= max_val) - *output_data = *input_data; - else - *output_data = max_val; - ); - } - else - { - real* ptr_input = THTensor_(data)(input); - real* ptr_output = THTensor_(data)(output); - ptrdiff_t i; - ptrdiff_t n = THTensor_(nElement)(input); - - if (inplace) -#pragma omp parallel for private(i) - for (i = 0; i < n; i++) - { - if (ptr_input[i] < min_val) - ptr_input[i] = min_val; - else if (ptr_input[i] > max_val) - ptr_input[i] = max_val; - } - else -#pragma omp parallel for private(i) - for (i = 0; i < n; i++) - { - if (ptr_input[i] < min_val) - ptr_output[i] = min_val; - else if (ptr_input[i] <= max_val) - ptr_output[i] = ptr_input[i]; - else - ptr_output[i] = max_val; - } - } -} - -void THNN_(HardTanh_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - accreal min_val_, - accreal max_val_, - bool inplace) -{ - real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_); - real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_); - - THNN_CHECK_NELEMENT(input, gradOutput); - if (inplace) - THTensor_(set)(gradInput, gradOutput); - else - THTensor_(resizeAs)(gradInput, input); - - if (input->nDimension == 1 || - !THTensor_(isContiguous)(input) || - !THTensor_(isContiguous)(gradOutput) || - !THTensor_(isContiguous)(gradInput)) - { - if (inplace) - { - TH_TENSOR_APPLY2(real, gradOutput, real, input, - if (*input_data <= min_val || *input_data >= max_val) - *gradOutput_data = 0; - ); - } - else - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, - if (*input_data <= min_val || *input_data >= max_val) - *gradInput_data = 0; - else - *gradInput_data = *gradOutput_data; - ); - } - else - { - real* ptr_gradOutput = THTensor_(data)(gradOutput); - real* ptr_gradInput = THTensor_(data)(gradInput); - real* ptr_input = THTensor_(data)(input); - ptrdiff_t i; - ptrdiff_t n = THTensor_(nElement)(input); - - if (inplace) -#pragma omp parallel for private(i) - for (i = 0; i < n; i++) - { - if (ptr_input[i] <= min_val || ptr_input[i] >= max_val) - ptr_gradInput[i] = 0; - } - else -#pragma omp parallel for private(i) - for (i = 0; i < n; i++) - { - if (ptr_input[i] <= min_val || ptr_input[i] >= max_val) - ptr_gradInput[i] = 0; - else - ptr_gradInput[i] = ptr_gradOutput[i]; - } - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/IndexLinear.c b/contrib/lua-torch/nn/lib/THNN/generic/IndexLinear.c deleted file mode 100644 index 42d8368ba4..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/IndexLinear.c +++ /dev/null @@ -1,742 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/IndexLinear.c" -#else - -#ifdef _OPENMP -#include -#endif - -/* Threshold used to trigger multithreading */ -#ifndef THNN_SPARSE_OMP_THRESHOLD -#define THNN_SPARSE_OMP_THRESHOLD 100000 -#endif - -/* Threshold used to trigger BLAS axpy call */ -#ifndef THNN_SPARSE_OUTDIM_THRESHOLD -#define THNN_SPARSE_OUTDIM_THRESHOLD 49 -#endif - -/* sign MACRO */ -#ifndef THNN_INDEXLINEAR_SIGN -#define THNN_INDEXLINEAR_SIGN(a) ( ( (a) < 0 ) ? -1 : ( (a) > 0 ) ) -#endif - -static bool THNN_(checkKeysValues)(THLongTensor* keys, THTensor* values) -{ - return THLongTensor_size(keys, 0) == THTensor_(nElement)(values) - && THTensor_(nDimension)(values) == 1 - && THLongTensor_nDimension(keys) == 1; -} - -void THNN_(IndexLinear_updateOutput)( - THNNState *state, - THLongTensor *keys, - long keysOffset, - THTensor *values, - THLongTensor *sizes, - THLongTensor *cumSumSizes, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *normalizedValues, - int train) -{ - /* Retrieve all the dimensions of the problem */ - long batchSize = THLongTensor_size(sizes, 0); - long keysSize = THLongTensor_size(keys, 0); - long outDim = THTensor_(size)(bias, 0); - long woutDim = THTensor_(size)(weight, 1); - int maxNormalize = woutDim - outDim; - long* sizesData = THLongTensor_data(sizes); - long* cumSumSizesData = THLongTensor_data(cumSumSizes); - - /* Define/resize the normalized values tensor if maxNormalize is > 0 */ - real* normalizedValuesData = NULL; - if (maxNormalize) - { - THTensor_(resize1d)(normalizedValues, keysSize); - normalizedValuesData = THTensor_(data)(normalizedValues); - } - - /* Resize the output */ - THTensor_(resize2d)(output, batchSize, outDim); - - /* Access the storage data/strides */ - real* outputData = THTensor_(data)(output); - real* valuesData = THTensor_(data)(values); - real* weightData = THTensor_(data)(weight); - long weightStride0 = weight->stride[0]; - real* biasData = THTensor_(data)(bias); - long* keysData = THLongTensor_data(keys); - - /* Make sure these inputs are contiguous to accelerate computations */ - THArgCheck(THLongTensor_isContiguous(keys), 1, "keys vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(values), 3, "values vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(output), 6, "output vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(weight), 7, "weight matrix must be contiguous"); - THArgCheck(THTensor_(isContiguous)(bias), 8, "bias vector must be contiguous"); - THArgCheck(THNN_(checkKeysValues)(keys, values), 1, "Keys and values should have the same number of elements"); - THArgCheck(THTensor_(isContiguous)(normalizedValues), 9, "normalizedValues vector must be contiguous"); - long i,j,k; - - /* Separate cases: output dimension is == 1, or > 1 - * This allows for some optimizations. */ - if (outDim == 1) - { - THVector_(fill)(outputData, *biasData, batchSize); - if (maxNormalize) - { - /* Parallelize on the batch itself */ -#pragma omp parallel \ - for private(i,j) \ - firstprivate(outDim, keysOffset, \ - weightData, keysData, \ - valuesData, outputData, \ - cumSumSizesData, sizesData) \ - schedule(static) \ - if(keysSize*outDim > THNN_SPARSE_OMP_THRESHOLD && batchSize > 1) - for (j = 0; j < batchSize; j++) - { - real* loutputData = outputData + j; - real val = 0; - real absVal = 0; - long offset = j == 0 ? 0 : cumSumSizesData[j - 1]; - - for (i = 0; i < sizesData[j]; i++) - { - long woffset = weightStride0*(keysData[offset] + keysOffset); - absVal = fabs(valuesData[offset]); - if (train) - { - if (absVal > weightData[woffset]) - { - weightData[woffset] = absVal; - weightData[woffset+1] = 1/absVal; - } - - /* - * The following can be used to scale the size of the updates - * depending on some rule, e.g. the frequency of a feature, ... - * This is used at update time. - * TODO: implement a smarter update scale. - */ - weightData[woffset+2] = 1; - } - normalizedValuesData[offset] = (absVal > weightData[woffset] ? THNN_INDEXLINEAR_SIGN(valuesData[offset]):valuesData[offset]*weightData[woffset+1]) + weightData[woffset+3]; - val += normalizedValuesData[offset] * weightData[woffset+maxNormalize]; - offset++; - } - *loutputData += val; - } - } - else - { - /* Parallelize on the batch itself */ -#pragma omp parallel \ - for private(i,j) \ - firstprivate(outDim, weightData, \ - keysData, valuesData, \ - outputData, cumSumSizesData, \ - sizesData) \ - schedule(static) \ - if(keysSize*outDim > THNN_SPARSE_OMP_THRESHOLD && batchSize > 1) - for (j = 0; j < batchSize; j++) - { - long offset = j == 0 ? 0 : cumSumSizesData[j - 1]; - real* loutputData = outputData + j; - real val = 0; - - for (i = 0; i < sizesData[j]; i++) - { - val += weightData[weightStride0*(keysData[offset] + keysOffset)] * valuesData[offset]; - offset++; - } - *loutputData += val; - } - } - } - else { -#pragma omp parallel \ - for private(i,j,k) \ - firstprivate(outDim, weightData, \ - keysData, valuesData, \ - biasData, outputData, \ - cumSumSizesData, sizesData) \ - schedule(static) \ - if(keysSize*outDim > THNN_SPARSE_OMP_THRESHOLD && batchSize > 1) - for (j = 0; j < batchSize; j++) - { - long offset = j == 0 ? 0 : cumSumSizesData[j - 1]; - real val = 0; - real* loutputData = outputData + j*outDim; - real* lweightData = weightData; - memcpy(loutputData, biasData, outDim*sizeof(real)); - for (i = 0; i < sizesData[j]; i++) - { - real val; - long woffset = weightStride0*(keysData[offset] + keysOffset); - if (maxNormalize) - { - val = valuesData[offset]; - real absVal = fabs(val); - if (train) - { - if (absVal > weightData[woffset]) - { - weightData[woffset] = absVal; - weightData[woffset+1] = 1/absVal; - } - - /* - * The following can be used to scale the size of the updates - * depending on some rule, e.g. the frequency of a feature, ... - * The commented section thereafter is just an example of what can be done: - * - *``` - * weightData[woffset+2] = weightData[woffset+2]==0?1:(weightData[woffset+2] / (weightData[woffset+2] + 1)); - * real alpha = 1; - * real beta = 0.01; - * real gamma = 1 - 0.000001; - * real l = weightData[woffset+2]==0?1/gamma:(weightData[woffset+2] - beta) / (alpha - beta); - * l = gamma*l; - * weightData[woffset+2] = (alpha-beta)*l + beta; - * ``` - * - * TODO: implement a smarter update scale. - */ - weightData[woffset+2] = 1; - } - - /* Normalize + Clamp */ - val = (absVal > weightData[woffset] ? THNN_INDEXLINEAR_SIGN(val):val*weightData[woffset+1]) + weightData[woffset+3]; - normalizedValuesData[offset] = val; - - lweightData = weightData + woffset + maxNormalize; - } - else - { - val = valuesData[offset]; - lweightData = weightData + woffset; - } - if (outDim > THNN_SPARSE_OUTDIM_THRESHOLD) - { - THBlas_(axpy)(outDim, val, lweightData, 1, loutputData, 1); - } - else - { - for (k=0; k < outDim; k++) - { - loutputData[k] += lweightData[k] * val; - } - } - offset++; - } - } - } - return; -} - -void THNN_(IndexLinear_updateParameters)( - THNNState *state, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *weight, - THTensor *bias, - THLongTensor *runningKeys, - THLongTensor *cumSumSizes, - long keysOffset, - accreal weightDecay_, - accreal learningRate_) -{ - real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); - real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); - /* Retrieve all the dimensions of the problem */ - long outDim = THTensor_(size)(bias, 0); - long woutDim = THTensor_(size)(weight, 1); - int maxNormalize = woutDim - outDim; - long keysSize = THLongTensor_size(runningKeys, 0); - - /* Access the storage data/strides */ - real* gradWeightData = THTensor_(data)(gradWeight); - real* weightData = THTensor_(data)(weight); - long weightStride0 = weight->stride[0]; - real* gradBiasData = THTensor_(data)(gradBias); - real* biasData = THTensor_(data)(bias); - long* keysData = THLongTensor_data(runningKeys); - - /* Make sure these inputs are contiguous to accelerate computations */ - THArgCheck(THTensor_(isContiguous)(gradWeight), 1, "gradWeight must be contiguous"); - THArgCheck(THTensor_(isContiguous)(gradBias), 2, "gradBias vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(weight), 3, "gradBias vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(bias), 4, "gradBias vector must be contiguous"); - THArgCheck(THLongTensor_isContiguous(runningKeys), 5, "keys vector must be contiguous"); - - int j,k; - long offset = 0; - - /* Update the bias first */ - THVector_(cadd)(biasData, biasData, gradBiasData, -learningRate, outDim); - - /* Separate cases: output dimension is == 1, or > 1 - * This allows for some optimizations. - * No multithreading here as this could - * corrupt the results (hogwild style) */ - if (outDim == 1) - { - if (maxNormalize) - { - if (weightDecay) - { - for (j = 0; j < keysSize; j++) - { - long woffset = weightStride0*(keysData[j] + keysOffset) + maxNormalize; - real lr = learningRate*weightData[woffset-2]; - weightData[woffset-1] -= weightData[woffset]*gradWeightData[2*j]*lr; - weightData[woffset] -= gradWeightData[2*j+1]*lr - weightDecay * weightData[woffset-2] * weightData[woffset]; - } - } - else - { - for (j = 0; j < keysSize; j++) - { - long woffset = weightStride0*(keysData[j] + keysOffset) + maxNormalize; - real lr = learningRate*weightData[woffset-2]; - weightData[woffset-1] -= weightData[woffset]*gradWeightData[2*j]*lr; - weightData[woffset] -= gradWeightData[2*j+1]*lr; - } - } - } - else - { - if (weightDecay) - { - for (j = 0; j < keysSize; j++) - { - long woffset = weightStride0*(keysData[j] + keysOffset); - weightData[woffset] -= gradWeightData[j]*learningRate + weightDecay * weightData[woffset]; - } - } - else - { - for (j = 0; j < keysSize; j++) - { - weightData[weightStride0*(keysData[j] + keysOffset)] -= gradWeightData[j]*learningRate; - } - } - } - } - else - { - for (j = 0; j < keysSize; j++) - { - real lr = learningRate; - real wd = weightDecay; - real* lweightData; - long woffset = weightStride0*(keysData[j] + keysOffset); - real* lgradWeightData = gradWeightData + j*outDim; - if (maxNormalize) - { - lgradWeightData += j*outDim; - /* weightData[woffset + 2] */ - lweightData = weightData + woffset + maxNormalize - 2; - lr = lr*lweightData[0]; - wd = weightDecay*lweightData[0]; - /* weightData[woffset + 3] */ - lweightData++; - for (k=0; k < outDim; k++) - { - lweightData[0] -= lgradWeightData[k]*lweightData[k+1]*lr; - } - lweightData++; - lgradWeightData += outDim; - } - else - { - lweightData = weightData + woffset; - } - - /* We do sparse weight decay. - * We think it makes more sense. */ - if (weightDecay) - { - for (k=0; k < outDim; k++) - { - lweightData[k] -= lweightData[k]*wd; - } - } - - if (outDim > THNN_SPARSE_OUTDIM_THRESHOLD) - { - THBlas_(axpy)(outDim, -lr, lgradWeightData, 1, lweightData, 1); - } - else - { - for (k=0; k < outDim; k++) - { - lweightData[k] -= lgradWeightData[k]*lr; - } - } - } - } -} - - -void THNN_(IndexLinear_accUpdateGradParameters)( - THNNState *state, - THLongTensor *keys, - long keysOffset, - THTensor *values, - THLongTensor *sizes, - THLongTensor *cumSumSizes, - THTensor *gradOutput, - THTensor *weight, - THTensor *bias, - accreal weightDecay_, - accreal scale_) -{ - real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - /* Retrieve all the dimensions of the problem */ - long batchSize = THLongTensor_size(sizes, 0); - long keysSize = THLongTensor_size(keys, 0); - long outDim = THTensor_(size)(bias, 0); - long woutDim = THTensor_(size)(weight, 1); - int maxNormalize = woutDim - outDim; - THArgCheck(THNN_(checkKeysValues)(keys, values), 1, "Keys and values should have the same number of elements"); - - /* Access the storage data/strides */ - real* gradOutputData = THTensor_(data)(gradOutput); - real* valuesData =THTensor_(data)(values); - real* weightData = THTensor_(data)(weight); - real* biasData = THTensor_(data)(bias); - long weightStride0 = weight->stride[0]; - long biasStride = bias->stride[0]; - long* keysData = THLongTensor_data(keys); - long* sizesData = THLongTensor_data(sizes); - - /* Make sure these inputs are contiguous to accelerate computations */ - THArgCheck(THLongTensor_isContiguous(keys), 1, "keys vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(values), 3, "values vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(gradOutput), 6, "gradOutput vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(weight), 7, "weight matrix must be contiguous"); - THArgCheck(THTensor_(isContiguous)(bias), 8, "bias matrix must be contiguous"); - - int i,j,k; - - /* Separate cases: output dimension is == 1, or > 1 - * This allows for some optimizations. - * No multithreading here as this could - * corrupt the results (hogwild style) */ - if (outDim == 1) - { - if (maxNormalize) - { - long offset = 0; - for (j = 0; j < batchSize; j++) - { - real* lgradOutputData = gradOutputData + j; - *biasData -= *lgradOutputData * scale; - real val = *lgradOutputData * scale; - real* lweightData = weightData; - for (i = 0; i < sizesData[j]; i++) - { - long idx = weightStride0*(keysData[offset] + keysOffset) + maxNormalize; - weightData[idx-1] -= weightData[idx]*val*weightData[idx-2]; - weightData[idx] -= (val*valuesData[offset] - weightDecay * weightData[idx])*weightData[idx-2]; - offset++; - } - } - - offset = 0; - for (j = 0; j < batchSize; j++) - { - real* lweightData = weightData; - for (i = 0; i < sizesData[j]; i++) - { - long idx = weightStride0*(keysData[offset] + keysOffset) + maxNormalize; - weightData[idx-2] = 0; - offset++; - } - } - } - else - { - if (weightDecay) - { - long offset = 0; - for (j = 0; j < batchSize; j++) - { - real* lgradOutputData = gradOutputData + j; - *biasData -= *lgradOutputData * scale; - real val = *lgradOutputData * scale; - real* lweightData = weightData; - for (i = 0; i < sizesData[j]; i++) - { - long idx = weightStride0*(keysData[offset] + keysOffset); - weightData[idx] -= val * valuesData[offset] + weightData[idx] * weightDecay; - offset++; - } - } - } - else - { - long offset = 0; - for (j = 0; j < batchSize; j++) - { - real val = gradOutputData[j] * scale; - for (i = 0; i < sizesData[j]; i++) - { - weightData[(keysData[offset] + keysOffset)*weightStride0] -= val * valuesData[offset]; - offset++; - } - *biasData -= val; - } - } - } - } - else { - long offset = 0; - for (j = 0; j < batchSize; j++) - { - real val = 0; - real* lgradOutputData = gradOutputData + j*outDim; - real* lweightData = weightData; - THVector_(cadd)(biasData, biasData, lgradOutputData, -scale, outDim); - for (i = 0; i < sizesData[j]; i++) - { - real val = valuesData[offset] * scale; - real wd = weightDecay; - - // Max normalize case - if (maxNormalize) - { - lweightData = weightData + weightStride0*(keysData[offset] + keysOffset) + (maxNormalize-2); - val *= lweightData[0]; - wd *= lweightData[0]; - for (k=0; k < outDim; k++) - { - lweightData[1] -= lweightData[k+2]*scale*lgradOutputData[k]*lweightData[0]; - } - lweightData += 2; - } - else - { - lweightData = weightData + weightStride0*(keysData[offset] + keysOffset); - } - - /* We do sparse weight decay. - * We think it makes more sense. */ - if (weightDecay) - { - if (outDim > THNN_SPARSE_OUTDIM_THRESHOLD) - { - THBlas_(axpy)(outDim, -wd, lweightData, 1, lweightData, 1); - } - else - { - for (k=0; k < outDim; k++) - { - lweightData[k] -= wd * lweightData[k]; - } - } - } - - if (outDim > THNN_SPARSE_OUTDIM_THRESHOLD) - { - THBlas_(axpy)(outDim, -val, lgradOutputData, 1, lweightData, 1); - } - else - { - for (k=0; k < outDim; k++) - { - lweightData[k] -= val * lgradOutputData[k]; - } - } - offset++; - } - } - - /* Max Normalize case: - * Reset the smart update scaling if - * one does it batch-wise. - * TODO: Decide what to do with that piece of code. - * NB: If the code belowe is uncommented, so should the commented - * code in IndexLinear:zeroGradParameters() */ - - /* - if (maxNormalize) - { - offset = 0; - for (j = 0; j < batchSize; j++) - { - real* lweightData = weightData; - for (i = 0; i < sizesData[j]; i++) - { - real val = valuesData[offset] * scale; - real wd = weightDecay; - - lweightData = weightData + weightStride0*(keysData[offset] + keysOffset) + (maxNormalize-2); - lweightData[0] = 0; - offset++; - } - } - } - */ - } - return; -} - -void THNN_(IndexLinear_accGradParameters)( - THNNState *state, - THLongTensor *keys, - long keysOffset, - THTensor *values, - THLongTensor *sizes, - THLongTensor *cumSumSizes, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *weight, - THTensor *bias, - THTensor *valuesBuffer, - accreal weightDecay_, - accreal scale_) -{ - real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - /* Retrieve all the dimensions of the problem */ - long batchSize = THLongTensor_size(sizes, 0); - long keysSize = THLongTensor_size(keys, 0); - long outDim = THTensor_(size)(bias, 0); - long woutDim = THTensor_(size)(weight, 1); - long maxNormalize = (woutDim - outDim) > 0 ?1:0; - THArgCheck(THNN_(checkKeysValues)(keys, values), 1, "Keys and values should have the same number of elements"); - long* sizesData = THLongTensor_data(sizes); - - /* COmpute the cumulative sizes */ - THLongTensor* cumSizes = THLongTensor_new(); - THLongTensor_cumsum(cumSizes, sizes, 0); - long* cumSizesData = THLongTensor_data(cumSizes); - - /* Resize the gradWeight buffer to keep it dense. - * That speeds up updates A LOT assuming random mem access. */ - THTensor_(resize2d)(gradWeight, keysSize, outDim * (maxNormalize>0?2:1)); - - /* Access the storage data/strides */ - real* gradOutputData = THTensor_(data)(gradOutput); - real* valuesData =THTensor_(data)(values); - real* gradWeightData = THTensor_(data)(gradWeight); - real* weightData = THTensor_(data)(weight); - real* gradBiasData = THTensor_(data)(gradBias); - long gradWeightStride0 = gradWeight->stride[0]; - long weightStride0 = weight->stride[0]; - long* keysData = THLongTensor_data(keys); - - /* Make sure these inputs are contiguous to accelerate computations */ - THArgCheck(THLongTensor_isContiguous(keys), 1, "keys vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(values), 3, "values vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(gradOutput), 6, "gradOutput vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(gradWeight), 7, "gradWeight must be contiguous"); - THArgCheck(THTensor_(isContiguous)(gradBias), 8, "gradBias vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(weight), 9, "weight must be contiguous"); - THArgCheck(THTensor_(isContiguous)(bias), 10, "bias vector must be contiguous"); - THArgCheck(THTensor_(isContiguous)(valuesBuffer), 11, "valuesBuffer must be contiguous"); - - int i,j,k; - - /* Separate cases: output dimension is == 1, or > 1 - * This allows for some optimizations. - * No multithreading here as this could - * corrupt the results (hogwild style) */ - if (outDim == 1) - { - for (j = 0; j < batchSize; j++) - { - long offset = j==0?0:cumSizesData[j-1]; - real val = gradOutputData[j] * scale; - real* lgradWeightData = gradWeightData + offset; - real* lvaluesData = valuesData + offset; - long end = sizesData[j]; - - if (maxNormalize) - { - lgradWeightData += offset; - i = 0; - for(;i < end; i++) - { - lgradWeightData[2*i] = val; - lgradWeightData[2*i+1] = val * lvaluesData[i]; - } - } - else - { - i = 0; - for(;i < end-4; i += 4) - { - lgradWeightData[i] = val * lvaluesData[i]; - lgradWeightData[i+1] = val * lvaluesData[i+1]; - lgradWeightData[i+2] = val * lvaluesData[i+2]; - lgradWeightData[i+3] = val * lvaluesData[i+3]; - } - - for(; i < end; i++) - { - lgradWeightData[i] = val * lvaluesData[i]; - } - } - *gradBiasData += val; - offset += end; - } - } - else { - for (j = 0; j < batchSize; j++) - { - long offset = j==0?0:cumSizesData[j-1]; - real val = 0; - real* lgradOutputData = gradOutputData + j*outDim; - real* lgradWeightData = gradWeightData; - real* lweightData = weightData; - THVector_(cadd)(gradBiasData, gradBiasData, lgradOutputData, scale, outDim); - for (i = 0; i < sizesData[j]; i++) - { - real val = valuesData[offset] * scale; - lgradWeightData = gradWeightData + offset*outDim; - if (maxNormalize) - { - lgradWeightData += offset*outDim; - k = 0; - for(;k < outDim-4; k += 4) - { - lgradWeightData[k] = lgradOutputData[k]*scale; - lgradWeightData[k+1] = lgradOutputData[k+1]*scale; - lgradWeightData[k+2] = lgradOutputData[k+2]*scale; - lgradWeightData[k+3] = lgradOutputData[k+3]*scale; - } - - for(; k < outDim; k++) - { - lgradWeightData[k] = lgradOutputData[k]*scale; - } - lgradWeightData += outDim; - } - k = 0; - for(;k < outDim-4; k += 4) - { - lgradWeightData[k] = val * lgradOutputData[k]; - lgradWeightData[k+1] = val * lgradOutputData[k+1]; - lgradWeightData[k+2] = val * lgradOutputData[k+2]; - lgradWeightData[k+3] = val * lgradOutputData[k+3]; - } - - for(; k < outDim; k++) - { - lgradWeightData[k] = val * lgradOutputData[k]; - } - offset++; - } - } - } - THLongTensor_free(cumSizes); - return; -} -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/L1Cost.c b/contrib/lua-torch/nn/lib/THNN/generic/L1Cost.c deleted file mode 100644 index 53940e8947..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/L1Cost.c +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/L1Cost.c" -#else - -void THNN_(L1Cost_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output) -{ - THNN_CHECK_DIM_SIZE(output, 1, 0, 1); - accreal sum = 0; - - TH_TENSOR_APPLY(real, input, - sum += fabs(*input_data); - ); - - THTensor_(set1d)(output, 0, sum); -} - -void THNN_(L1Cost_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput) -{ - THNN_CHECK_NELEMENT(input, gradOutput); - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY2(real, gradInput, real, input, - if (*input_data > 0) - *gradInput_data = 1; - else if (*input_data < 0) - *gradInput_data = -1; - else - *gradInput_data = 0; - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/LeakyReLU.c b/contrib/lua-torch/nn/lib/THNN/generic/LeakyReLU.c deleted file mode 100644 index 074047d834..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/LeakyReLU.c +++ /dev/null @@ -1,57 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/LeakyReLU.c" -#else - -void THNN_(LeakyReLU_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal negval_, - bool inplace) -{ - real negval = TH_CONVERT_ACCREAL_TO_REAL(negval_); - if (inplace) - { - TH_TENSOR_APPLY(real, input, - if (*input_data <= 0) - *input_data *= negval; - ); - THTensor_(set)(output, input); - } - else - { - THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY2(real, output, real, input, - *output_data = *input_data > 0 ? *input_data : *input_data * negval; - ); - } -} - -void THNN_(LeakyReLU_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - accreal negval_, - bool inplace) -{ - real negval = TH_CONVERT_ACCREAL_TO_REAL(negval_); - THNN_CHECK_NELEMENT(input, gradOutput); - if (inplace) - { - TH_TENSOR_APPLY2(real, gradOutput, real, input, - if (*input_data <= 0) - *gradOutput_data *= negval; - ); - THTensor_(set)(gradInput, gradOutput); - } - else - { - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, - *gradInput_data = *input_data > 0 ? *gradOutput_data : *gradOutput_data * negval; - ); - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/Linear.c b/contrib/lua-torch/nn/lib/THNN/generic/Linear.c deleted file mode 100644 index 8c5cd115ea..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/Linear.c +++ /dev/null @@ -1,114 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/Linear.c" -#else - -void THNN_(Linear_updateAddBuffer)( - THNNState *state, - THTensor *input, - THTensor *addBuffer) -{ - long nframe = THTensor_(size)(input,0); - long nElement = THTensor_(nElement)(addBuffer); - if (nElement != nframe) { - THTensor_(resize1d)(addBuffer,nframe); - THTensor_(fill)(addBuffer,1.0); - } -} - -void THNN_(Linear_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *addBuffer) -{ - long dim = THTensor_(nDimension)(input); - if (dim == 1) { - THTensor_(resize1d)(output,THTensor_(size)(weight,0)); - if (bias) { - THTensor_(copy)(output,bias); - } - else { - THTensor_(zero)(output); - } - THTensor_(addmv)(output,1,output,1,weight,input); - } - else if (dim == 2) { - long nframe = THTensor_(size)(input,0); - long nElement = THTensor_(nElement)(output); - THTensor_(resize2d)(output,nframe,THTensor_(size)(weight,0)); - if (THTensor_(nElement)(output) != nElement) { - THTensor_(zero)(output); - } - THNN_(Linear_updateAddBuffer)(state,input,addBuffer); - THTensor *tweight = THTensor_(new)(); - THTensor_(transpose)(tweight,weight,0,1); - THTensor_(addmm)(output,0,output,1,input,tweight); - THTensor_(free)(tweight); - if (bias) { - THTensor_(addr)(output,1,output,1,addBuffer,bias); - } - } -} - -void THNN_(Linear_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight) -{ - if (gradInput) { - long nElement = THTensor_(nElement)(gradInput); - THTensor_(resizeAs)(gradInput,input); - if (THTensor_(nElement)(gradInput) != nElement) { - THTensor_(zero)(gradInput); - } - - long dim = THTensor_(nDimension)(input); - if (dim == 1) { - THTensor *tweight = THTensor_(new)(); - THTensor_(transpose)(tweight,weight,0,1); - THTensor_(addmv)(gradInput,0,gradInput,1,tweight,gradOutput); - THTensor_(free)(tweight); - } - else if (dim == 2) { - THTensor_(addmm)(gradInput,0,gradInput,1,gradOutput,weight); - } - } -} - -void THNN_(Linear_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *bias, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *addBuffer, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - long dim = THTensor_(nDimension)(input); - if (dim == 1) { - THTensor_(addr)(gradWeight,1,gradWeight,scale,gradOutput,input); - if (bias) { - THTensor_(cadd)(gradBias,gradBias,scale,gradOutput); - } - } - else if (dim == 2) { - THTensor *tgradOutput = THTensor_(new)(); - THTensor_(transpose)(tgradOutput,gradOutput,0,1); - THTensor_(addmm)(gradWeight,1,gradWeight,scale,tgradOutput,input); - if (bias) { - THNN_(Linear_updateAddBuffer)(state,input,addBuffer); - THTensor_(addmv)(gradBias,1,gradBias,scale,tgradOutput,addBuffer); - } - THTensor_(free)(tgradOutput); - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/LogSigmoid.c b/contrib/lua-torch/nn/lib/THNN/generic/LogSigmoid.c deleted file mode 100644 index 651d560024..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/LogSigmoid.c +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/LogSigmoid.c" -#else - -void THNN_(LogSigmoid_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *buffer) -{ - THTensor_(resizeAs)(output, input); - THTensor_(resizeAs)(buffer, input); - - TH_TENSOR_APPLY3(real, output, real, input, real, buffer, - real z = exp(-*input_data); - *buffer_data = z; - *output_data = -log(1. + z); - ); -} - -void THNN_(LogSigmoid_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *buffer) -{ - THNN_CHECK_NELEMENT(input, gradOutput); - THTensor_(resizeAs)(gradInput, buffer); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, buffer, - real z = *buffer_data; - *gradInput_data = *gradOutput_data * z / (1. + z); - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/LogSoftMax.c b/contrib/lua-torch/nn/lib/THNN/generic/LogSoftMax.c deleted file mode 100644 index a7280422b1..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/LogSoftMax.c +++ /dev/null @@ -1,137 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/LogSoftMax.c" -#else - -void THNN_(LogSoftMax_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output) -{ - real *input_data, *output_data; - ptrdiff_t nframe = 0, dim = 0, stride = 0; - ptrdiff_t t, d; - - if (input->nDimension == 1) - { - nframe = 1; - dim = input->size[0]; - stride = 1; - } - else if (input->nDimension == 2) - { - nframe = input->size[0]; - dim = input->size[1]; - stride = 1; - } - else if (input->nDimension == 3) - { - nframe = 1; - dim = input->size[0]; - stride = input->size[1]*input->size[2]; - } - else if (input->nDimension == 4) - { - nframe = input->size[0]; - dim = input->size[1]; - stride = input->size[2]*input->size[3]; - } - else - THArgCheck(0, 2, "1D, 2D, 3D or 4D tensor expected"); - - input = THTensor_(newContiguous)(input); - THTensor_(resizeAs)(output, input); - - real *input_data0 = THTensor_(data)(input); - real *output_data0 = THTensor_(data)(output); - - accreal logsum; - real maxInput; - #pragma omp parallel for private(t, d, maxInput, logsum, input_data, output_data) - for (t = 0; t < stride*nframe; t++) - { - logsum = 0; - maxInput = -THInf; - input_data = input_data0 + (t/stride)*dim*stride + t % stride; - output_data = output_data0 + (t/stride)*dim*stride + t % stride; - - for (d = 0; d < dim; d++) - maxInput = THMax(maxInput, input_data[d*stride]); - - for (d = 0; d < dim; d++) - logsum += exp(input_data[d*stride] - maxInput); - logsum = maxInput + log(logsum); - - for (d = 0; d < dim; d++) - output_data[d*stride] = input_data[d*stride] - logsum; - } - - THTensor_(free)(input); -} - -void THNN_(LogSoftMax_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output) -{ - THNN_CHECK_SHAPE(input, gradOutput); - real *gradInput_data, *gradOutput_data, *output_data; - ptrdiff_t nframe = 0, dim = 0, stride = 0; - ptrdiff_t t, d; - - if (output->nDimension == 1) - { - nframe = 1; - dim = output->size[0]; - stride = 1; - } - else if (output->nDimension == 2) - { - nframe = output->size[0]; - dim = output->size[1]; - stride = 1; - } - else if (output->nDimension == 3) - { - nframe = 1; - dim = output->size[0]; - stride = output->size[1]*output->size[2]; - } - else if (output->nDimension == 4) - { - nframe = output->size[0]; - dim = output->size[1]; - stride = output->size[2]*output->size[3]; - } - else - THError("1D, 2D, 3D or 4D tensor expected"); - - output = THTensor_(newContiguous)(output); - gradOutput = THTensor_(newContiguous)(gradOutput); - - THTensor_(resizeAs)(gradInput, output); - real *gradInput_data0 = THTensor_(data)(gradInput); - real *output_data0 = THTensor_(data)(output); - real *gradOutput_data0 = THTensor_(data)(gradOutput); - accreal sum; - #pragma omp parallel for private(t, sum, d, gradInput_data, output_data, gradOutput_data) - for (t = 0; t < stride*nframe; t++) - { - sum = 0; - gradInput_data = gradInput_data0 + (t/stride)*dim*stride + t % stride; - output_data = output_data0 + (t/stride)*dim*stride + t % stride; - gradOutput_data = gradOutput_data0 + (t/stride)*dim*stride + t % stride; - - for (d = 0; d < dim; d++) - sum += gradOutput_data[d*stride]; - - for (d = 0; d < dim; d++) - gradInput_data[d*stride] = gradOutput_data[d*stride] - exp(output_data[d*stride])*sum; - } - - THTensor_(free)(gradOutput); - THTensor_(free)(output); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/LookupTable.c b/contrib/lua-torch/nn/lib/THNN/generic/LookupTable.c deleted file mode 100644 index 46bc2c3c11..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/LookupTable.c +++ /dev/null @@ -1,225 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/LookupTable.c" -#else - -static void THNN_(LookupTable_resetCount)( - THInteger_t *count_data, - THIndexTensor *input) -{ - ptrdiff_t i; - THIndex_t *input_data = THIndexTensor_(data)(input); - ptrdiff_t numel = THIndexTensor_(nElement)(input); - - for (i = 0; isize[0]); - count_data = THIntegerTensor_(data)(count); - } - - if (!THTensor_(isContiguous)(gradWeight)) - THError("gradWeight must be contiguous"); - if (!THIndexTensor_(isContiguous)(input)) - THError("input must be contiguous"); - if (THIndexTensor_(nDimension)(input) != 1 && THIndexTensor_(nDimension)(input) != 2) { - THDescBuff s1 = THIndexTensor_(sizeDesc)(input); - THError("input must be a vector or matrix, but is of shape: %s", s1.str); - } - - THIndex_t *input_data = THIndexTensor_(data)(input); - ptrdiff_t numel = THIndexTensor_(nElement)(input); - long numw = THTensor_(size)(gradWeight, 0); - - // check that inputs are all within range - for (i=0; i= numw + TH_INDEX_BASE) { - THError("inputs need to be in the range %ld <= input < %ld, " - "but got input of value: %ld", TH_INDEX_BASE, (numw + TH_INDEX_BASE), - input_data[i]); - } - - gradOutput = THTensor_(newContiguous)(gradOutput); - - real *gw = THTensor_(data)(gradWeight); - real *go = THTensor_(data)(gradOutput); - long stride = THTensor_(stride)(gradWeight, 0); - - if (count_data) - THNN_(LookupTable_resetCount)(count_data, input); - -#ifdef _OPENMP - if (numel > 1000) - { - // The strategy is to parallelize over sections of the vocabulary, so that - // thread 1 handles updates to gradWeight[0..nVocab/nThreads]. Every thread - // has to traverse the entire input, but the dominating factor is the axpy - // BLAS call. - #pragma omp parallel private(i) - { - int tid = omp_get_thread_num(); - int nthreads = omp_get_num_threads(); - - long start = tid * (numw/nthreads + 1); - long end = start + (numw/nthreads + 1); - for (i=0; i= start && k < end) - { - real scale_ = scale; - if (count_data) scale_ /= count_data[k]; - THBlas_(axpy)(stride, scale_, go + i*stride, 1, gw + k*stride, 1); - } - } - } - } - - THTensor_(free)(gradOutput); - return; - } -#endif - - for (i=0; i maxNorm) - { - new_norm = maxNorm / (norm + 1e-7); - for (j=0; j= numw + TH_INDEX_BASE) { - THError("input need to be in the range %ld <= input < %ld, " - "but got input of value: %ld", TH_INDEX_BASE, (numw + TH_INDEX_BASE), - row_idx[i]); - } - } - // get unique indices - qsort(row_idx, numel, sizeof(THIndex_t), THNN_(compare_THIndex)); - ptrdiff_t ptr = 0; - for (i=0; i 1000) - { - // The strategy is to parallelize over the rows that appear in - // row_idx, so that thread 1 handles the rows in row_idx[0..numel/nThreads]. - // This distributes the work evenly to each thread. - #pragma omp parallel for private(i) - for (i=0; i0 ? z : 0; - ); - - if (sizeAverage) - sum /= THTensor_(nElement)(input); - - THTensor_(set1d)(output, 0, sum); -} - -void THNN_(MarginCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *gradInput, - bool sizeAverage, - accreal margin_) -{ - real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); - THNN_CHECK_NELEMENT(input, target); - real norm = (sizeAverage ? 1./((real)THTensor_(nElement)(input)) : 1.); - - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - *gradInput_data = (*input_data * *target_data) < margin ? -norm * *target_data : 0; - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/MultiLabelMarginCriterion.c b/contrib/lua-torch/nn/lib/THNN/generic/MultiLabelMarginCriterion.c deleted file mode 100644 index 16398c13c5..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/MultiLabelMarginCriterion.c +++ /dev/null @@ -1,184 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/MultiLabelMarginCriterion.c" -#else - -// TODO: improve error messages -void THNN_(MultiLabelMarginCriterion_updateOutput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *output, - THTensor *isTarget, - bool sizeAverage) -{ - real *input_data, *isTarget_data; - THIndex_t *target_data; - long nframe, dim; - long t, d, dt, ddt; - real sum; - - THArgCheck((input->nDimension == 1) || (input->nDimension == 2), 2, - "vector or matrix expected"); - - if (input->nDimension == 1) - { - nframe = 1; - dim = input->size[0]; - THArgCheck((target->nDimension == 1) && (target->size[0] == dim), 3, - "inconsistent target size"); - } - else - { - nframe = input->size[0]; - dim = input->size[1]; - THArgCheck((target->nDimension == 2) && (target->size[0] == nframe) - && (target->size[1] == dim), 3, "inconsistent target size"); - } - - THArgCheck(THIndexTensor_(minall)(target) >= -1+TH_INDEX_BASE, 3, "target out of range"); - THArgCheck(THIndexTensor_(maxall)(target) < dim+TH_INDEX_BASE, 3, "target out of range"); - - target = THIndexTensor_(newContiguous)(target); - input = THTensor_(newContiguous)(input); - input_data = THTensor_(data)(input); - target_data = THIndexTensor_(data)(target); - - THNN_resizeAs_indices(isTarget, target); - THTensor_(zero)(isTarget); - isTarget_data = THTensor_(data)(isTarget); - - sum = 0; - for (t = 0; t < nframe; t++) - { - for (ddt = 0; ddt < dim; ddt++) - { - THIndex_t target_idx = target_data[ddt] - TH_INDEX_BASE; - if (target_idx < 0) - break; - isTarget_data[target_idx] = 1; - } - for (dt = 0; dt < dim; dt++) - { - THIndex_t target_idx = target_data[dt] - TH_INDEX_BASE; - real input_target; - if (target_idx < 0) - break; - - input_target = input_data[target_idx]; - for (d = 0; d < dim; d++) - { - if (!isTarget_data[d]) - { - real z = 1 - input_target + input_data[d]; - if (z > 0) - sum += z; - } - } - } - input_data += dim; - target_data += dim; - isTarget_data += dim; - } - - sum /= dim; - if (sizeAverage) - sum /= nframe; - - THTensor_(set1d)(output, 0, sum); - - THTensor_(free)(input); - THIndexTensor_(free)(target); -} - -void THNN_(MultiLabelMarginCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *gradInput, - THTensor *isTarget, - bool sizeAverage) -{ - real *input_data; - real *gradInput_data; - THIndex_t *target_data; - real *isTarget_data; - long nframe, dim; - long t, d, dt; - real g; - - THArgCheck((input->nDimension == 1) || (input->nDimension == 2), 2, - "vector or matrix expected"); - - if (input->nDimension == 1) - { - nframe = 1; - dim = input->size[0]; - THArgCheck((target->nDimension == 1) && (target->size[0] == dim), 3, - "inconsistent target size"); - THArgCheck((isTarget->nDimension == 1) && (isTarget->size[0] == dim), 3, - "inconsistent isTarget size"); - } - else - { - nframe = input->size[0]; - dim = input->size[1]; - THArgCheck((target->nDimension == 2) && (target->size[0] == nframe) - && (target->size[1] == dim), 3, "inconsistent target size"); - THArgCheck((isTarget->nDimension == 2) && (isTarget->size[0] == nframe) - && (isTarget->size[1] == dim), 3, "inconsistent isTarget size"); - } - - THArgCheck(THIndexTensor_(minall)(target) >= -1+TH_INDEX_BASE, 3, "target out of range"); - THArgCheck(THIndexTensor_(maxall)(target) < dim+TH_INDEX_BASE, 3, "target out of range"); - - THArgCheck(THTensor_(minall)(isTarget) >= 0, 3, "isTarget out of range"); - THArgCheck(THTensor_(maxall)(isTarget) <= 1, 3, "isTarget out of range"); - - target = THIndexTensor_(newContiguous)(target); - input = THTensor_(newContiguous)(input); - isTarget = THTensor_(newContiguous)(isTarget); - input_data = THTensor_(data)(input); - target_data = THIndexTensor_(data)(target); - isTarget_data = THTensor_(data)(isTarget); - - g = sizeAverage ? ( 1./((real)(nframe*dim)) ) : ( 1./((real)dim) ); - - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - gradInput_data = THTensor_(data)(gradInput); - - for (t = 0; t < nframe; t++) - { - for (dt = 0; dt < dim; dt++) - { - THIndex_t target_idx = target_data[dt] - TH_INDEX_BASE; - real input_target; - if (target_idx < 0) - break; - - input_target = input_data[target_idx]; - for (d = 0; d < dim; d++) - { - if (!isTarget_data[d]) - { - real z = 1 - input_target + input_data[d]; - if (z > 0) - { - gradInput_data[target_idx] -= g; - gradInput_data[d] += g; - } - } - } - } - input_data += dim; - target_data += dim; - isTarget_data += dim; - gradInput_data += dim; - } - - THTensor_(free)(input); - THIndexTensor_(free)(target); - THTensor_(free)(isTarget); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/MultiMarginCriterion.c b/contrib/lua-torch/nn/lib/THNN/generic/MultiMarginCriterion.c deleted file mode 100644 index 2f8f8ff58f..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/MultiMarginCriterion.c +++ /dev/null @@ -1,168 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/MultiMarginCriterion.c" -#else - -// TODO: improve error messages -void THNN_(MultiMarginCriterion_updateOutput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *output, - bool sizeAverage, - int p, - THTensor *weights, - accreal margin_) -{ - real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); - real *input_data, *weights_data; - THIndex_t *target_data; - long nframe, dim; - long t, d; - real sum; - - THArgCheck((input->nDimension == 1) || (input->nDimension == 2), 2, - "vector or matrix expected"); - - if (input->nDimension == 1) - { - nframe = 1; - dim = input->size[0]; - } - else - { - nframe = input->size[0]; - dim = input->size[1]; - THArgCheck((target->nDimension == 1) && (target->size[0] == nframe), 3, - "inconsistent target size"); - } - - for (t = 0; t < nframe; t++) - { - THIndex_t idx = THIndexTensor_(get1d)(target, t); - THArgCheck((idx >= TH_INDEX_BASE) && (idx < dim + TH_INDEX_BASE), 3, - "target out of range"); - } - - input = THTensor_(newContiguous)(input); - target = THIndexTensor_(newContiguous)(target); - weights = weights ? THTensor_(newContiguous)(weights) : NULL; - input_data = THTensor_(data)(input); - target_data = THIndexTensor_(data)(target); - weights_data = weights ? THTensor_(data)(weights) : NULL; - - sum = 0; - for (t = 0; t < nframe; t++) - { - THIndex_t target_idx = target_data[t] - TH_INDEX_BASE; - real input_target = input_data[target_idx]; - for (d = 0; d < dim; d++) - { - real z = margin - input_target + input_data[d]; - if (d == target_idx) - continue; - - if (z > 0) { - real h = (p==1) ? z : z*z; - if(weights_data) - h *= weights_data[target_idx]; - sum += h; - } - } - input_data += dim; - } - - sum /= dim; - if(sizeAverage) - sum /= nframe; - - THTensor_(set1d)(output, 0, sum); - - THTensor_(free)(input); - THIndexTensor_(free)(target); - if(weights) - THTensor_(free)(weights); -} - -void THNN_(MultiMarginCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *gradInput, - bool sizeAverage, - int p, - THTensor *weights, - accreal margin_) -{ - real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_); - real *input_data; - real *gradInput_data; - THIndex_t *target_data; - real *weights_data; - long nframe, dim; - long t, d; - real g; - - THArgCheck((input->nDimension == 1) || (input->nDimension == 2), 2, - "vector or matrix expected"); - - if (input->nDimension == 1) - { - nframe = 1; - dim = input->size[0]; - } - else - { - nframe = input->size[0]; - dim = input->size[1]; - THArgCheck((target->nDimension == 1) && (target->size[0] == nframe), 3, - "inconsistent target size"); - } - - g = (sizeAverage ? 1./((real)(nframe*dim)) : 1./((real)dim)); - - input = THTensor_(newContiguous)(input); - target = THIndexTensor_(newContiguous)(target); - input_data = THTensor_(data)(input); - - THTensor_(resizeAs)(gradInput, input); - gradInput_data = THTensor_(data)(gradInput); - - target_data = THIndexTensor_(data)(target); - weights = weights ? THTensor_(newContiguous)(weights) : NULL; - weights_data = weights ? THTensor_(data)(weights) : NULL; - - for (t = 0; t < nframe; t++) - { - THIndex_t target_idx = target_data[t] - TH_INDEX_BASE; - real input_target = input_data[target_idx]; - real gradInput_target = 0; - for (d = 0; d < dim; d++) - { - real z = margin - input_target + input_data[d]; - if (d == target_idx) - continue; - - if (z > 0) - { - real h = (p == 1) ? g : 2*g*z; - if(weights_data) - h *= weights_data[target_idx]; - gradInput_target -= h; - gradInput_data[d] = h; - } - else - gradInput_data[d] = 0; - } - gradInput_data[target_idx] = gradInput_target; - - input_data += dim; - gradInput_data += dim; - } - - THTensor_(free)(input); - THIndexTensor_(free)(target); - if(weights) - THTensor_(free)(weights); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/PReLU.c b/contrib/lua-torch/nn/lib/THNN/generic/PReLU.c deleted file mode 100644 index 488322fde9..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/PReLU.c +++ /dev/null @@ -1,207 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/PReLU.c" -#else - -void THNN_(PReLU_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THIndex_t nOutputPlane) -{ - THTensor_(resizeAs)(output, input); - - if (nOutputPlane == 0) - { - // handle shared parameter case - real w = *THTensor_(data)(weight); - TH_TENSOR_APPLY2(real, output, real, input, - *output_data = (*input_data > 0) ? *input_data : w*(*input_data); - ); - } - else - { - input = THTensor_(newContiguous)(input); - long bs = 1, ks = 1; - { - long input_ndim = THTensor_(nDimension)(input); - if (input->size[input_ndim > 1] != nOutputPlane) - THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]); - - if (input_ndim > 1) { - bs = input->size[0]; - for (int d = 2; d < input_ndim; d++) { - ks *= input->size[d]; - } - } - } - - real *output_data = THTensor_(data)(output); - real *input_data = THTensor_(data)(input); - real *weight_data = THTensor_(data)(weight); - THIndex_t i, j, k; -#pragma omp parallel for private(j,k) - for (i = 0; i < bs; ++i) - { - real* n_input_data = input_data + i*nOutputPlane*ks; - real* n_output_data = output_data + i*nOutputPlane*ks; - for (j = 0; j < nOutputPlane; ++j) - { - for (k = 0; k < ks; ++k) - n_output_data[k] = (n_input_data[k] > 0) ? n_input_data[k] : weight_data[j] * n_input_data[k]; - n_input_data += ks; - n_output_data += ks; - } - } - THTensor_(free)(input); - } -} - -void THNN_(PReLU_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THIndex_t nOutputPlane) -{ - THNN_CHECK_NELEMENT(input, gradOutput); - THTensor_(resizeAs)(gradInput, input); - - if (nOutputPlane == 0) - { - real w = THTensor_(data)(weight)[0]; - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, - if ((*input_data) > 0) - *gradInput_data = *gradOutput_data; - else - *gradInput_data = w * (*gradOutput_data); - ); - } - else - { - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - weight = THTensor_(newContiguous)(weight); - const real *input_data = THTensor_(data)(input); - const real *gradOutput_data = THTensor_(data)(gradOutput); - const real *weight_data = THTensor_(data)(weight); - real *gradInput_data = THTensor_(data)(gradInput); - - long bs = 1, ks = 1; - { - long input_ndim = THTensor_(nDimension)(input); - if (input->size[input_ndim > 1] != nOutputPlane) - THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]); - - if (input_ndim > 1) { - bs = input->size[0]; - for (int d = 2; d < input_ndim; d++) { - ks *= input->size[d]; - } - } - } - - THIndex_t i, j, k; -#pragma omp parallel for private(j,k) - for (i = 0; i < bs; ++i) - { - const real *n_input_data = input_data + i*nOutputPlane*ks; - const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; - real *n_gradInput_data = gradInput_data + i*nOutputPlane*ks; - - for (j = 0; j < nOutputPlane; ++j) - { - real w = weight_data[j]; - for (k = 0; k < ks; ++k) - { - if (n_input_data[k] > 0) - n_gradInput_data[k] = n_gradOutput_data[k]; - else - n_gradInput_data[k] = n_gradOutput_data[k] * w; - } - n_input_data += ks; - n_gradInput_data += ks; - n_gradOutput_data += ks; - } - } - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(weight); - } -} - -void THNN_(PReLU_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *gradWeight, - THTensor *gradWeightBuf, - THTensor *gradWeightBuf2, - THIndex_t nOutputPlane, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - THNN_CHECK_NELEMENT(input, gradOutput); - - if (nOutputPlane == 0) - { - real *gradWeight_data = THTensor_(data)(gradWeight); - real sum = 0; - TH_TENSOR_APPLY2(real, input, real, gradOutput, - if ((*input_data) <= 0) - sum += (*input_data) * (*gradOutput_data); - ); - gradWeight_data[0] += scale * sum; - } - else - { - THArgCheck(THTensor_(isContiguous)(gradWeight), 6, "gradWeight needs to be contiguous"); - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - weight = THTensor_(newContiguous)(weight); - long bs = 1, ks = 1; - { - long input_ndim = THTensor_(nDimension)(input); - if (input->size[input_ndim > 1] != nOutputPlane) - THError("Wrong number of input planes. Expected %d but got %d.", nOutputPlane, input->size[input_ndim > 1]); - - if (input_ndim > 1) { - bs = input->size[0]; - for (int d = 2; d < input_ndim; d++) { - ks *= input->size[d]; - } - } - } - - const real *input_data = THTensor_(data)(input); - const real *gradOutput_data = THTensor_(data)(gradOutput); - const real *weight_data = THTensor_(data)(weight); - real *gradWeight_data = THTensor_(data)(gradWeight); - - THIndex_t i, j, k; - for (i = 0; i < bs; ++i) - { - const real *n_input_data = input_data + i*nOutputPlane*ks; - const real *n_gradOutput_data = gradOutput_data + i*nOutputPlane*ks; - - for (j = 0; j < nOutputPlane; ++j) - { - real sum = 0; - for (k = 0; k < ks; ++k) - if (n_input_data[k] <= 0) - sum += n_gradOutput_data[k] * n_input_data[k]; - gradWeight_data[j] += scale * sum; - n_input_data += ks; - n_gradOutput_data += ks; - } - } - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(weight); - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/RReLU.c b/contrib/lua-torch/nn/lib/THNN/generic/RReLU.c deleted file mode 100644 index 8fd46d3c29..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/RReLU.c +++ /dev/null @@ -1,132 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/RReLU.c" -#else - -void THNN_(RReLU_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *noise, - accreal lower_, - accreal upper_, - bool train, - bool inplace, - THGenerator *generator) -{ - real lower = TH_CONVERT_ACCREAL_TO_REAL(lower_); - real upper = TH_CONVERT_ACCREAL_TO_REAL(upper_); - if (train) - { - // get default random generator - THTensor_(resizeAs)(noise, input); - if (inplace) - { - TH_TENSOR_APPLY2(real, input, real, noise, - if (*input_data <= 0) - { - const real r = (real)THRandom_uniform(generator, lower, upper); - *input_data = (*input_data) * r; - *noise_data = r; - } - else - { - *noise_data = 1; - } - ); - THTensor_(set)(output, input); - } - else - { - THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY3(real, input, real, output, real, noise, - if (*input_data <= 0) - { - const real r = (real)THRandom_uniform(generator, lower, upper); - *output_data = (*input_data) * r; - *noise_data = r; - } - else - { - *output_data = *input_data; - *noise_data = 1; - } - ); - } - } - else - { - const real negSlope = (lower + upper) / 2; - if (inplace) - { - TH_TENSOR_APPLY(real, input, - if (*input_data <= 0) - { - *input_data = *input_data * negSlope; - } - ); - THTensor_(set)(output, input); - } - else - { - THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY2(real, input, real, output, - const real r = (*input_data) <= 0 ? negSlope : 1; - *output_data = *input_data * r; - ); - } - } -} - -void THNN_(RReLU_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *noise, - accreal lower_, - accreal upper_, - bool train, - bool inplace) -{ - real lower = TH_CONVERT_ACCREAL_TO_REAL(lower_); - real upper = TH_CONVERT_ACCREAL_TO_REAL(upper_); - THNN_CHECK_NELEMENT(input, gradOutput); - if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU - { - // multiply the gradient by the noise tensor - if (inplace) - { - THTensor_(cmul)(gradOutput, gradOutput, noise); - THTensor_(set)(gradInput, gradOutput); - } - else - { - THTensor_(resizeAs)(gradInput, input); - THTensor_(cmul)(gradInput, gradOutput, noise); - } - } - else - { - // use constant factor for negative input values - const real negSlope = (lower + upper) / 2; - if (inplace) - { - TH_TENSOR_APPLY2(real, gradOutput, real, input, - if (*input_data <= 0) - { - *gradOutput_data = (*gradOutput_data) * negSlope; - } - ); - THTensor_(set)(gradInput, gradOutput); - } - else - { - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, - *gradInput_data = (*input_data) <= 0 ? (*gradOutput_data) * negSlope : (*gradOutput_data); - ); - } - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/Sigmoid.c b/contrib/lua-torch/nn/lib/THNN/generic/Sigmoid.c deleted file mode 100644 index 17fb2cb4db..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/Sigmoid.c +++ /dev/null @@ -1,28 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/Sigmoid.c" -#else - -void THNN_(Sigmoid_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output) -{ - THTensor_(sigmoid)(output, input); -} - -void THNN_(Sigmoid_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output) -{ - THNN_CHECK_NELEMENT(output, gradOutput); - THTensor_(resizeAs)(gradInput, output); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, - real z = *output_data; - *gradInput_data = *gradOutput_data * (1. - z) * z; - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SmoothL1Criterion.c b/contrib/lua-torch/nn/lib/THNN/generic/SmoothL1Criterion.c deleted file mode 100644 index d1928d11ce..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SmoothL1Criterion.c +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SmoothL1Criterion.c" -#else - -void THNN_(SmoothL1Criterion_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *output, - bool sizeAverage) -{ - THNN_CHECK_NELEMENT(input, target); - THNN_CHECK_DIM_SIZE(output, 1, 0, 1); - - real sum = 0; - TH_TENSOR_APPLY2(real, input, real, target, - real z = fabs(*input_data - *target_data); - sum += z < 1 ? 0.5*z*z : z - 0.5; - ); - - if (sizeAverage) - sum /= THTensor_(nElement)(input); - - THTensor_(set1d)(output, 0, sum); -} - -void THNN_(SmoothL1Criterion_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *gradInput, - bool sizeAverage) -{ - THNN_CHECK_NELEMENT(input, target); - real norm = (sizeAverage ? 1./((real)THTensor_(nElement)(input)) : 1.); - - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - real x = *input_data - *target_data; - if (x < -1.) - *gradInput_data = - norm; - else if (x > 1.) - *gradInput_data = norm; - else - *gradInput_data = norm * x; - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SoftMarginCriterion.c b/contrib/lua-torch/nn/lib/THNN/generic/SoftMarginCriterion.c deleted file mode 100644 index bac0a3b539..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SoftMarginCriterion.c +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SoftMarginCriterion.c" -#else - -void THNN_(SoftMarginCriterion_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *output, - bool sizeAverage) -{ - THNN_CHECK_NELEMENT(input, target); - THNN_CHECK_DIM_SIZE(output, 1, 0, 1); - - real sum; - - sum = 0; - TH_TENSOR_APPLY2(real, input, real, target, - real z = log(1. + exp(-*input_data* *target_data)); - sum += z;) - - if(sizeAverage) - sum /= THTensor_(nElement)(input); - - THTensor_(set1d)(output, 0, sum); -} - -void THNN_(SoftMarginCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *gradInput, - bool sizeAverage) -{ - THNN_CHECK_NELEMENT(input, target); - real norm = (sizeAverage ? 1./((real)THTensor_(nElement)(input)) : 1.); - - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, input, real, target, - real z = exp(-*target_data * *input_data); - *gradInput_data = -norm*(*target_data)*z/(1. + z);) -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SoftMax.c b/contrib/lua-torch/nn/lib/THNN/generic/SoftMax.c deleted file mode 100644 index 7b60d64c2f..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SoftMax.c +++ /dev/null @@ -1,150 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SoftMax.c" -#else - -void THNN_(SoftMax_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output) -{ - real *input_data, *output_data; - ptrdiff_t nframe = 0, dim = 0, stride = 0; - ptrdiff_t t; - - if (input->nDimension == 1) - { - nframe = 1; - dim = input->size[0]; - stride = 1; - } - else if (input->nDimension == 2) - { - nframe = input->size[0]; - dim = input->size[1]; - stride = 1; - } - else if (input->nDimension == 3) - { - nframe = 1; - dim = input->size[0]; - stride = input->size[1]*input->size[2]; - } - else if (input->nDimension == 4) - { - nframe = input->size[0]; - dim = input->size[1]; - stride = input->size[2]*input->size[3]; - } - else - { - THArgCheck(0, 2, "1D, 2D, 3D or 4D tensor expected"); - } - - input = THTensor_(newContiguous)(input); - THTensor_(resizeAs)(output, input); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - -#pragma omp parallel for private(t) - for (t = 0; t < stride*nframe; t++) - { - real *input_ptr = input_data + (t/stride)*dim*stride + t % stride; - real *output_ptr = output_data + (t/stride)*dim*stride + t % stride; - - real inputMax = -THInf; - accreal sum; - - ptrdiff_t d; - for (d = 0; d < dim; d++) - { - if (input_ptr[d*stride] >= inputMax) inputMax = input_ptr[d*stride]; - } - - sum = 0; - for (d = 0; d < dim; d++) - { - real z = exp(input_ptr[d*stride] - inputMax); - output_ptr[d*stride] = z; - sum += z; - } - - for (d = 0; d < dim; d++) - { - output_ptr[d*stride] *= 1/sum; - } - } - - THTensor_(free)(input); -} - -void THNN_(SoftMax_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output) -{ - THNN_CHECK_SHAPE(input, gradOutput); - real *gradInput_data, *gradOutput_data, *output_data; - ptrdiff_t nframe = 0, dim = 0, stride = 0; - ptrdiff_t t; - - if (output->nDimension == 1) - { - nframe = 1; - dim = output->size[0]; - stride = 1; - } - else if (output->nDimension == 2) - { - nframe = output->size[0]; - dim = output->size[1]; - stride = 1; - } - else if (output->nDimension == 3) - { - nframe = 1; - dim = output->size[0]; - stride = output->size[1]*output->size[2]; - } - else if (output->nDimension == 4) - { - nframe = output->size[0]; - dim = output->size[1]; - stride = output->size[2]*output->size[3]; - } - else - { - THError("1D, 2D, 3D or 4D tensor expected"); - } - - gradOutput = THTensor_(newContiguous)(gradOutput); - output = THTensor_(newContiguous)(output); - - THTensor_(resizeAs)(gradInput, output); - gradInput_data = THTensor_(data)(gradInput); - output_data = THTensor_(data)(output); - gradOutput_data = THTensor_(data)(gradOutput); - -#pragma omp parallel for private(t) - for (t = 0; t < stride*nframe; t++) - { - real *gradInput_ptr = gradInput_data + (t/stride)*dim*stride + t % stride; - real *output_ptr = output_data + (t/stride)*dim*stride + t % stride; - real *gradOutput_ptr = gradOutput_data + (t/stride)*dim*stride + t % stride; - - ptrdiff_t d; - accreal sum = 0; - for (d = 0; d < dim; d++) - sum += (accreal)gradOutput_ptr[d*stride] * output_ptr[d*stride]; - - for (d = 0; d < dim; d++) - gradInput_ptr[d*stride] = output_ptr[d*stride] * (gradOutput_ptr[d*stride] - sum); - } - - THTensor_(free)(gradOutput); - THTensor_(free)(output); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SoftPlus.c b/contrib/lua-torch/nn/lib/THNN/generic/SoftPlus.c deleted file mode 100644 index 6491e66d63..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SoftPlus.c +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SoftPlus.c" -#else - -void THNN_(SoftPlus_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal beta_, - accreal threshold_) -{ - real beta = TH_CONVERT_ACCREAL_TO_REAL(beta_); - real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); - THTensor_(resizeAs)(output, input); - - // f(x) = 1/beta * log(1 + exp(beta * x)) - TH_TENSOR_APPLY2(real, output, real, input, \ - *output_data = (*input_data * beta) > threshold ? *input_data : THLog1p(exp(*input_data * beta)) / beta; - ); -} - -void THNN_(SoftPlus_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output, - accreal beta_, - accreal threshold_) -{ - real beta = TH_CONVERT_ACCREAL_TO_REAL(beta_); - real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); - THNN_CHECK_NELEMENT(input, gradOutput); - THTensor_(resizeAs)(gradInput, output); - - // d/dx[log(1+exp(k*x))/k] = exp(kx) / (exp(kx) + 1) - // SINCE - // y = (1/k)*log(1+exp(k*x)) --> x = (1/k)*log(exp(k*y)-1) - // THEREFORE: - // d/dx(f(x)) = (exp(k*y) - 1) / exp(k*y) - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, - real z = exp(*output_data * beta); - *gradInput_data = (*output_data * beta) > threshold ? *gradOutput_data : *gradOutput_data * (z - 1.)/z; - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SoftShrink.c b/contrib/lua-torch/nn/lib/THNN/generic/SoftShrink.c deleted file mode 100644 index e779508689..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SoftShrink.c +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SoftShrink.c" -#else - -void THNN_(SoftShrink_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal lambda_) -{ - real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); - THTensor_(resizeAs)(output, input); - - TH_TENSOR_APPLY2(real, output, real, input, - if ((*input_data) > lambda) - *output_data = *input_data - lambda; - else if ((*input_data) < -lambda) - *output_data = *input_data + lambda; - else - *output_data = 0; - ); -} - -void THNN_(SoftShrink_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - accreal lambda_) -{ - real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_); - THNN_CHECK_NELEMENT(input, gradOutput); - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, - if ((*input_data) > lambda || (*input_data) < -lambda) - *gradInput_data = (*gradOutput_data); - else - *gradInput_data = 0; - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SparseLinear.c b/contrib/lua-torch/nn/lib/THNN/generic/SparseLinear.c deleted file mode 100644 index 1cf7122126..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SparseLinear.c +++ /dev/null @@ -1,564 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SparseLinear.c" -#else - -#ifdef _OPENMP -#include -#endif - -#define ROW_PTR2(t, r) (THTensor_(data)(t) + (r) * (t)->stride[0]) -#define COL_PTR2(t, c) (THTensor_(data)(t) + (c) * (t)->stride[1]) - -static bool THNN_(checkLegacyInput)(THTensor* t) -{ - return t->nDimension == 3 && t->size[2] == 2; -} - -static bool THNN_(checkInput)(THTensor* t) -{ - return t->nDimension == 2 && t->size[1] == 3; -} - -static bool THNN_(checkSize2D)(THTensor* t, long size0, long size1) -{ - return t->nDimension == 2 && t->size[0] == size0 && t->size[1] == size1; -} - -static bool THNN_(checkSize1D)(THTensor* t, long size0) -{ - return t->nDimension == 1 && t->size[0] == size0; -} - -static void THNN_(set1d)(THTensor *t, long x0, real value) { - THStorage_(set)(t->storage, t->storageOffset + x0*t->stride[0], value); -} -static real THNN_(get3d)(const THTensor *t, long x0, long x1, long x2) { - return THStorage_(get)(t->storage, t->storageOffset + - x0*t->stride[0] + x1*t->stride[1] + x2*t->stride[2]); -} -static real THNN_(get2d)(const THTensor *t, long x0, long x1) { - return THStorage_(get)(t->storage, t->storageOffset + - x0*t->stride[0] + x1*t->stride[1]); -} - -void THNN_(SparseLinear_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias) -{ - long h, i, j, hp0, hp1; - long outDim = THTensor_(size)(weight, 0); - long inDim = THTensor_(size)(weight, 1); - long batchSize = THTensor_(size)(output, 0); - - THArgCheck(THNN_(checkInput)(input), 2, "input must be in coo format, nnz x 3"); - THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous"); - THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong"); - - long nnz = THTensor_(size)(input, 0); - - THLongTensor * csr = THLongTensor_newWithSize1d(batchSize+1); - THLongTensor_zero(csr); - - weight = THTensor_(newContiguous)(weight); - -//#pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000) - for (i=0; i 10000) - for (h = 0; h < batchSize; h++) { - long i_start = THLongTensor_get1d(csr, h); - long i_end = THLongTensor_get1d(csr, h+1); - for (i = i_start; i < i_end; i++) { - real val = THNN_(get2d)(input, i, 2); - if (val == 0) { - continue; - } - - long offset = (long)(THNN_(get2d)(input, i, 1)) - 1; - if (offset >= 0 && offset < inDim) { - THBlas_(axpy)(outDim, - val, - COL_PTR2(weight, offset), weight->stride[0], - ROW_PTR2(output, h), output->stride[1]); - } else { - THError("index out of bound. updateOutput: %d not between 1 and %d", - offset + 1, inDim); - } - } - } - - THTensor* output_row = THTensor_(new)(); - for (h = 0; h < batchSize; h++) { - THTensor_(select)(output_row, output, 0, h); - THTensor_(cadd)(output_row, bias, 1.0, output_row); - } - THTensor_(free)(output_row); - THLongTensor_free(csr); - THTensor_(free)(weight); -} - -void THNN_(SparseLinear_legacyUpdateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias) -{ - long h, i; - long outDim = THTensor_(size)(weight, 0); - long inDim = THTensor_(size)(weight, 1); - - THArgCheck(THNN_(checkLegacyInput)(input), 2, "input size must be batchsize x nnz x 2"); - THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous"); - THArgCheck(THNN_(checkSize1D)(bias, outDim), 5, "bias size wrong"); - - weight = THTensor_(newContiguous)(weight); - - long batchSize = THTensor_(size)(input, 0); - long nnz = THTensor_(size)(input, 1); - THTensor_(resize2d)(output, batchSize, outDim); - - // output = weight * input + bias - THTensor_(zero)(output); -#pragma omp parallel for private(h, i) schedule(static) if ( \ - batchSize > 1 && batchSize * nnz * outDim > 10000) - for (h = 0; h < batchSize; h++) { - for (i = 0; i < nnz; i++) { - real val = THNN_(get3d)(input, h, i, 1); - if (val == 0) { - continue; - } - - long offset = (long)(THNN_(get3d)(input, h, i, 0)) - 1; - if (offset >= 0 && offset < inDim) { - THBlas_(axpy)(outDim, - val, - COL_PTR2(weight, offset), weight->stride[0], - ROW_PTR2(output, h), output->stride[1]); - } else { - THError("index out of bound. updateOutput: %d not between 1 and %d", - offset + 1, inDim); - } - } - } - - THTensor* output_row = THTensor_(new)(); - for (h = 0; h < batchSize; h++) { - THTensor_(select)(output_row, output, 0, h); - THTensor_(cadd)(output_row, bias, 1.0, output_row); - } - THTensor_(free)(output_row); - THTensor_(free)(weight); -} - -void THNN_(SparseLinear_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *weight, - THTensor *bias, - accreal weightDecay_, - accreal scale_) -{ - real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - long h, i, col, hp0, hp1; - long outDim = THTensor_(size)(weight, 0); - long inDim = THTensor_(size)(weight, 1); - - THArgCheck(THNN_(checkInput)(input), 2, - "input must be in coo format, nnz x 3"); - THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, - "gradWeight size wrong"); - THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, - "gradBias size wrong"); - THArgCheck(THTensor_(isContiguous)(gradOutput), 1, - "gradOutput must be contiguous"); - - long nnz = THTensor_(size)(input, 0); - - THLongTensor* csc = THLongTensor_newWithSize1d(inDim+1); - THLongTensor_zero(csc); - weight = THTensor_(newContiguous)(weight); - -#pragma omp parallel for private(i, h, hp0, hp1) schedule(static) if (nnz > 10000) - for (i = 0; i < nnz; i++) { - hp0 = (long)(THNN_(get2d)(input, i, 1)) - 1; - hp1 = (i+1 == nnz) ? - inDim : - (long)(THNN_(get2d)(input, i+1, 1)) - 1; - if (hp0 != hp1) for (h = hp0; h < hp1; h++) { - THLongTensor_set1d(csc, h+1, i+1); - } - } - - // gradWeight += gradOutput * input -#pragma omp parallel for private(h, i, col) schedule(static) if (nnz > 10000) - for (col = 0; col < inDim; col++) { - long i_start = THLongTensor_get1d(csc, col); - long i_end = THLongTensor_get1d(csc, col+1); - for (i = i_start; i < i_end; i++) { - real val = scale * THNN_(get2d)(input, i, 2); - - h = (long)(THNN_(get2d)(input, i, 0)) - 1; - long offset = (long)(THNN_(get2d)(input, i, 1)) - 1; - if (offset >= 0 && offset < inDim) { - THBlas_(axpy)(outDim, - val, - ROW_PTR2(gradOutput, h), gradOutput->stride[1], - COL_PTR2(gradWeight, offset), gradWeight->stride[0]); - } else { - THError( - "index out of bound. accGradParameters: %d not between 1 and %d", - offset + 1, - inDim); - } - } - } - - // gradBias += gradOutput - THTensor* buf = THTensor_(new)(); - THTensor_(sum)(buf, gradOutput, 0, 1); - THTensor_(cadd)(gradBias, gradBias, scale, buf); - THTensor_(free)(buf); - THLongTensor_free(csc); - - if (weightDecay != 0) { - THTensor_(cadd)(gradWeight, gradWeight, weightDecay, weight); - } - THTensor_(free)(weight); -} - -void THNN_(SparseLinear_legacyAccGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *weight, - THTensor *bias, - accreal weightDecay_, - accreal scale_) -{ - real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_); - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - long h, i; - long outDim = THTensor_(size)(weight, 0); - long inDim = THTensor_(size)(weight, 1); - - THArgCheck(THNN_(checkLegacyInput)(input), 2, - "input size must be batchsize x nnz x 2"); - THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, - "gradWeight size wrong"); - THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, - "gradBias size wrong"); - THArgCheck(THTensor_(isContiguous)(gradOutput), 1, - "gradOutput must be contiguous"); - - long batchSize = THTensor_(size)(input, 0); - long nnz = THTensor_(size)(input, 1); - THTensor_(resize2d)(gradOutput, batchSize, outDim); - - // gradWeight += gradOutput * input -#pragma omp parallel for private(h, i) schedule(static) if (\ - batchSize * nnz * outDim > 10000) - for (i = 0; i < nnz; i++) { - for (h = 0; h < batchSize; h++) { - real val = scale * THNN_(get3d)(input, h, i, 1); - if (val == 0) { - continue; - } - - long offset = (long)(THNN_(get3d)(input, h, i, 0)) - 1; - if (offset >= 0 && offset < inDim) { - THBlas_(axpy)(outDim, - val, - ROW_PTR2(gradOutput, h), gradOutput->stride[1], - COL_PTR2(gradWeight, offset), gradWeight->stride[0]); - } else { - THError( - "index out of bound. accGradParameters: %d not between 1 and %d", - offset + 1, - inDim); - } - } - } - - // gradBias += gradOutput - THTensor* gradOutput_row = THTensor_(new)(); - for (h = 0; h < batchSize; h++) { - THTensor_(select)(gradOutput_row, gradOutput, 0, h); - THTensor_(cadd)(gradBias, gradBias, scale, gradOutput_row); - } - THTensor_(free)(gradOutput_row); - - if (weightDecay != 0) { - THTensor_(cadd)(gradWeight, gradWeight, weightDecay, weight); - } -} - -void THNN_(SparseLinear_updateParameters)( - THNNState *state, - THTensor *weight, - THTensor *bias, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *lastInput, - accreal learningRate_) -{ - real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); - long h, i; - long outDim = weight->size[0]; - long inDim = weight->size[1]; - - THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, - "gradWeight size wrong"); - THArgCheck(THNN_(checkSize1D)(bias, outDim), 3, "bias size wrong"); - THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong"); - THArgCheck(THNN_(checkInput)(lastInput), 6, - "input must be in coo format, nnz x 3"); - - - long nnz = THTensor_(size)(lastInput, 0); - - // collect unique offsets of non-0 val in input - THTensor* offsets = THTensor_(newWithSize1d)(nnz); - long cnt = 0; - for (i = 0; i < nnz; i++) { - real val = THNN_(get2d)(lastInput, i, 2); - if (val == 0) { - continue; - } - long offset = (long)(THNN_(get2d)(lastInput, i, 1)) - 1; - if (offset >= 0 && offset < inDim) { - THNN_(set1d)(offsets, cnt++, offset); - } else { - THError( - "index out of bound. updateParameters: %d not between 1 and %d", - offset + 1, - inDim); - } - } - if (cnt == 0) return; - THTensor_(resize1d)(offsets, cnt); - - THTensor* uniqueOffsets = THTensor_(new)(); - THLongTensor* ri = THLongTensor_new(); - THTensor_(sort)(uniqueOffsets, ri, offsets, 0, 0); - THLongTensor_free(ri); - THTensor_(free)(offsets); - - cnt = 1; - real* uniqueOffsets_p = THTensor_(data)(uniqueOffsets); - for (i = 1; i < THTensor_(size)(uniqueOffsets, 0); i++) { - if (uniqueOffsets_p[i] != uniqueOffsets_p[i - 1]) { - uniqueOffsets_p[cnt++] = uniqueOffsets_p[i]; - } - } - THTensor_(resize1d)(uniqueOffsets, cnt); - - // weight += -learningRate * gradWeight - THTensor_(cadd)(bias, bias, -learningRate, gradBias); -#pragma omp parallel for private(i) schedule(static) if (cnt * outDim > 10000) - for (i = 0; i < cnt; i++) { - long offset = (long)uniqueOffsets_p[i]; - THBlas_(axpy)(outDim, - -learningRate, - COL_PTR2(gradWeight, offset), gradWeight->stride[0], - COL_PTR2(weight, offset), weight->stride[0]); - } - - THTensor_(free)(uniqueOffsets); -} - -void THNN_(SparseLinear_legacyUpdateParameters)( - THNNState *state, - THTensor *weight, - THTensor *bias, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *lastInput, - accreal learningRate_) -{ - real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_); - long h, i; - long outDim = weight->size[0]; - long inDim = weight->size[1]; - - THArgCheck(THNN_(checkSize2D)(gradWeight, outDim, inDim), 4, - "gradWeight size wrong"); - THArgCheck(THNN_(checkSize1D)(bias, outDim), 3, "bias size wrong"); - THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 5, "gradBias size wrong"); - THArgCheck(THNN_(checkLegacyInput)(lastInput), 6, - "input size must be batchsize x nnz x 2"); - - - long batchSize = THTensor_(size)(lastInput, 0); - long nnz = THTensor_(size)(lastInput, 1); - - // collect unique offsets of non-0 val in input - THTensor* offsets = THTensor_(newWithSize1d)(batchSize * nnz); - long cnt = 0; - for (h = 0; h < batchSize; h++) { - for (i = 0; i < nnz; i++) { - real val = THNN_(get3d)(lastInput, h, i, 1); - if (val == 0 ) { - continue; - } - long offset = (long)(THNN_(get3d)(lastInput, h, i, 0)) - 1; - if (offset >= 0 && offset < inDim) { - THNN_(set1d)(offsets, cnt++, offset); - } else { - THError( - "index out of bound. updateParameters: %d not between 1 and %d", - offset + 1, - inDim); - } - } - } - THTensor_(resize1d)(offsets, cnt); - - THTensor* uniqueOffsets = THTensor_(new)(); - THLongTensor* ri = THLongTensor_new(); - THTensor_(sort)(uniqueOffsets, ri, offsets, 0, 0); - THLongTensor_free(ri); - THTensor_(free)(offsets); - - cnt = 1; - real* uniqueOffsets_p = THTensor_(data)(uniqueOffsets); - for (i = 1; i < THTensor_(size)(uniqueOffsets, 0); i++) { - if (uniqueOffsets_p[i] != uniqueOffsets_p[i - 1]) { - uniqueOffsets_p[cnt++] = uniqueOffsets_p[i]; - } - } - THTensor_(resize1d)(uniqueOffsets, cnt); - - // weight += -learningRate * gradWeight - THTensor_(cadd)(bias, bias, -learningRate, gradBias); -#pragma omp parallel for private(i) schedule(static) if (cnt * outDim > 10000) - for (i = 0; i < cnt; i++) { - long offset = (long)uniqueOffsets_p[i]; - THBlas_(axpy)(outDim, - -learningRate, - COL_PTR2(gradWeight, offset), gradWeight->stride[0], - COL_PTR2(weight, offset), weight->stride[0]); - } - - THTensor_(free)(uniqueOffsets); -} - -void THNN_(SparseLinear_zeroGradParameters)( - THNNState *state, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *lastInput) -{ - long h, i, j; - - long outDim = gradWeight->size[0]; - long inDim = gradWeight->size[1]; - - THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 3, "gradBias size wrong"); - THArgCheck(THNN_(checkInput)(lastInput), 4, - "input must be in coo format, nnz x 3"); - - THTensor_(zero)(gradBias); - - long nnz = THTensor_(size)(lastInput, 0); - -#pragma omp parallel for private(i, j) schedule(static) if ( \ - nnz * outDim > 10000) - for (i = 0; i < nnz; i++) { - if (THNN_(get2d)(lastInput, i, 2) == 0 ) { - continue; - } - - long offset = (long)(THNN_(get2d)(lastInput, i, 1)) - 1; - if (offset >= 0 && offset < inDim) { - real* pGradWeight = COL_PTR2(gradWeight, offset); - if (gradWeight->stride[0] == 1) { - THVector_(fill)(pGradWeight, 0, outDim); - } else { - long stride = gradWeight->stride[0]; - for (j = 0; j < outDim; ++j) { - pGradWeight[j * stride] = 0; - } - } - } else { - THError( - "index out of bound. zeroGradParameters: %d not between 1 and %d", - offset + 1, - inDim); - } - } -} - -void THNN_(SparseLinear_legacyZeroGradParameters)( - THNNState *state, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *lastInput) -{ - long h, i, j; - - long outDim = gradWeight->size[0]; - long inDim = gradWeight->size[1]; - - THArgCheck(THNN_(checkSize1D)(gradBias, outDim), 3, "gradBias size wrong"); - THArgCheck(THNN_(checkLegacyInput)(lastInput), 4, - "input size must be batchsize x nnz x 2"); - - THTensor_(zero)(gradBias); - - long batchSize = THTensor_(size)(lastInput, 0); - long nnz = THTensor_(size)(lastInput, 1); - -#pragma omp parallel for private(h, i, j) schedule(static) if ( \ - batchSize > 1 && batchSize * nnz * outDim > 10000) - for (h = 0; h < batchSize; h++) { - for (i = 0; i < nnz; i++) { - if (THNN_(get3d)(lastInput, h, i, 1) == 0 ) { - continue; - } - - long offset = (long)(THNN_(get3d)(lastInput, h, i, 0)) - 1; - if (offset >= 0 && offset < inDim) { - real* pGradWeight = COL_PTR2(gradWeight, offset); - if (gradWeight->stride[0] == 1) { - THVector_(fill)(pGradWeight, 0, outDim); - } else { - long stride = gradWeight->stride[0]; - for (j = 0; j < outDim; ++j) { - pGradWeight[j * stride] = 0; - } - } - } else { - THError( - "index out of bound. zeroGradParameters: %d not between 1 and %d", - offset + 1, - inDim); - } - } - } -} - -#undef ROW_PTR2 -#undef COL_PTR2 - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialAdaptiveAveragePooling.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialAdaptiveAveragePooling.c deleted file mode 100644 index 3675b42d78..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialAdaptiveAveragePooling.c +++ /dev/null @@ -1,258 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialAdaptiveAveragePooling.c" -#else - -#define START_IND(a,b,c) (int)floor((float)(a * c) / b) -#define END_IND(a,b,c) (int)ceil((float)((a + 1) * c) / b) -// #define START_IND(a,b,c) a * c / b -// #define END_IND(a,b,c) (a + 1) * c / b + ((a + 1) * c % b > 0)?1:0 - -static void THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)( - real *input_p, - real *output_p, - long nslices, - long iwidth, - long iheight, - long owidth, - long oheight, - long stridew, - long strideh, - long strided) -{ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - /* loop over output */ - long i, j; - for(i = 0; i < oheight; i++) - { - int y_start = START_IND(i, oheight, iheight); - int y_end = END_IND(i, oheight, iheight); - int kH = y_end-y_start; - - for(j = 0; j < owidth; j++) - { - - int x_start = START_IND(j, owidth, iwidth); - int x_end = END_IND(j, owidth, iwidth); - int kW = x_end-x_start; - - /* local pointers */ - real *ip = input_p + k*strided + y_start*strideh + x_start*stridew; - real *op = output_p + k*owidth*oheight + i*owidth + j; - - /* compute local average: */ - real sum = 0; - int x,y; - for(y = 0; y < kH; y++) - { - for(x = 0; x < kW; x++) - { - real val = *(ip + y*strideh + x*stridew); - sum += val; - } - } - - /* set output to local average */ - *op = sum / kW / kH; - } - } - } -} - -void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int owidth, - int oheight) -{ - int dimw = 2; - int dimh = 1; - long nbatch = 1; - long nslices; - long iheight; - long iwidth; - - long istride_d; - long istride_h; - long istride_w; - long istride_b; - - real *input_data; - real *output_data; - - - THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, - "3D or 4D (batch mode) tensor expected for input, but got: %s"); - - if (input->nDimension == 4) - { - istride_b = input->stride[0]; - nbatch = input->size[0]; - dimw++; - dimh++; - } - - /* sizes */ - nslices = input->size[dimh-1]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - /* strides */ - istride_d = input->stride[dimh-1]; - istride_h = input->stride[dimh]; - istride_w = input->stride[dimw]; - - /* resize output */ - if (input->nDimension == 3) - { - THTensor_(resize3d)(output, nslices, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - - THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data, output_data, - nslices, - iwidth, iheight, - owidth, oheight, - istride_w,istride_h, - istride_d); - } - else - { - long p; - - THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) - { - THNN_(SpatialAdaptiveAveragePooling_updateOutput_frame)(input_data+p*istride_b, output_data+p*nslices*owidth*oheight, - nslices, - iwidth, iheight, - owidth, oheight, - istride_w,istride_h, - istride_d); - } - } -} - -static void THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, - long nslices, - long iwidth, - long iheight, - long owidth, - long oheight) -{ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - real *gradInput_p_k = gradInput_p + k*iwidth*iheight; - real *gradOutput_p_k = gradOutput_p + k*owidth*oheight; - - /* calculate average */ - long i, j; - for(i = 0; i < oheight; i++) - { - int y_start = START_IND(i, oheight, iheight); - int y_end = END_IND(i, oheight, iheight); - int kH = y_end-y_start; - - for(j = 0; j < owidth; j++) - { - - int x_start = START_IND(j, owidth, iwidth); - int x_end = END_IND(j, owidth, iwidth); - int kW = x_end-x_start; - - int x,y; - for(y = y_start; y < y_end; y++) - { - for(x = x_start; x < x_end; x++) - { - /* update gradient */ - gradInput_p_k[y*iwidth + x] += gradOutput_p_k[i*owidth + j] / kW / kH; - } - } - } - } - } -} - -void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput) -{ - int dimw = 2; - int dimh = 1; - long nbatch = 1; - int nslices; - int iheight; - int iwidth; - int oheight; - int owidth; - real *gradInput_data; - real *gradOutput_data; - - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - if (input->nDimension == 4) { - nbatch = input->size[0]; - dimw++; - dimh++; - } - - /* sizes */ - nslices = input->size[dimh-1]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - oheight = gradOutput->size[dimh]; - owidth = gradOutput->size[dimw]; - - /* get raw pointers */ - gradInput_data = THTensor_(data)(gradInput); - gradOutput_data = THTensor_(data)(gradOutput); - - /* backprop */ - if (input->nDimension == 3) - { - THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data, gradOutput_data, - nslices, - iwidth, iheight, - owidth, oheight); - } - else - { - long p; -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) - { - THNN_(SpatialAdaptiveAveragePooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight, - nslices, - iwidth, iheight, - owidth, oheight); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - -#endif - -#undef START_IND -#undef END_IND \ No newline at end of file diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialAdaptiveMaxPooling.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialAdaptiveMaxPooling.c deleted file mode 100644 index fff716e676..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialAdaptiveMaxPooling.c +++ /dev/null @@ -1,274 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialAdaptiveMaxPooling.c" -#else - -static void THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)( - real *input_p, - real *output_p, - THIndex_t *indx_p, - THIndex_t *indy_p, - long nslices, - long iwidth, - long iheight, - long owidth, - long oheight, - long stridew, - long strideh, - long strided) -{ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - /* loop over output */ - long i, j; - for(i = 0; i < oheight; i++) - { - int y_start = (int)floor((float)i / oheight * iheight); - int y_end = (int)ceil((float)(i + 1) / oheight * iheight); - int kH = y_end-y_start; - - for(j = 0; j < owidth; j++) - { - - int x_start = (int)floor((float)j / owidth * iwidth); - int x_end = (int)ceil((float)(j + 1) / owidth * iwidth); - int kW = x_end-x_start; - - /* local pointers */ - real *ip = input_p + k*strided + y_start*strideh + x_start*stridew; - real *op = output_p + k*owidth*oheight + i*owidth + j; - THIndex_t *indyp = indy_p + k*owidth*oheight + i*owidth + j; - THIndex_t *indxp = indx_p + k*owidth*oheight + i*owidth + j; - - /* compute local max: */ - long maxindex = -1; - real maxval = -FLT_MAX; - long tcntr = 0; - int x,y; - for(y = 0; y < kH; y++) - { - for(x = 0; x < kW; x++) - { - real val = *(ip + y*strideh + x*stridew); - if (val > maxval) - { - maxval = val; - maxindex = tcntr; - } - tcntr++; - } - } - - /* set output to local max */ - *op = maxval; - - /* store location of max (x,y) */ - *indyp = (maxindex / kW) + TH_INDEX_BASE; - *indxp = (maxindex % kW) + TH_INDEX_BASE; - } - } - } -} - -void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int owidth, - int oheight) -{ - int dimw = 2; - int dimh = 1; - long nbatch = 1; - long nslices; - long iheight; - long iwidth; - - long istride_d; - long istride_h; - long istride_w; - long istride_b; - - real *input_data; - real *output_data; - THIndex_t *indices_data; - - - THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, - "3D or 4D (batch mode) tensor expected for input, but got: %s"); - - if (input->nDimension == 4) - { - istride_b = input->stride[0]; - nbatch = input->size[0]; - dimw++; - dimh++; - } - - /* sizes */ - nslices = input->size[dimh-1]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - /* strides */ - istride_d = input->stride[dimh-1]; - istride_h = input->stride[dimh]; - istride_w = input->stride[dimw]; - - /* resize output */ - if (input->nDimension == 3) - { - THTensor_(resize3d)(output, nslices, oheight, owidth); - /* indices will contain i,j locations for each output point */ - THIndexTensor_(resize4d)(indices, 2, nslices, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - - THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data, output_data, - indices_data+nslices*owidth*oheight, indices_data, - nslices, - iwidth, iheight, - owidth, oheight, - istride_w,istride_h, - istride_d); - } - else - { - long p; - - THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); - /* indices will contain i,j locations for each output point */ - THIndexTensor_(resize5d)(indices, 2, nbatch, nslices, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) - { - THNN_(SpatialAdaptiveMaxPooling_updateOutput_frame)(input_data+p*istride_b, output_data+p*nslices*owidth*oheight, - indices_data+(p+nbatch)*nslices*owidth*oheight, indices_data+p*nslices*owidth*oheight, - nslices, - iwidth, iheight, - owidth, oheight, - istride_w,istride_h, - istride_d); - } - } -} - -static void THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, - THIndex_t *indx_p, - THIndex_t *indy_p, - long nslices, - long iwidth, - long iheight, - long owidth, - long oheight) -{ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - real *gradInput_p_k = gradInput_p + k*iwidth*iheight; - real *gradOutput_p_k = gradOutput_p + k*owidth*oheight; - THIndex_t *indx_p_k = indx_p + k*owidth*oheight; - THIndex_t *indy_p_k = indy_p + k*owidth*oheight; - - /* calculate max points */ - long i, j; - for(i = 0; i < oheight; i++) - { - int y_start = (int)floor((float) i / oheight * iheight); - for(j = 0; j < owidth; j++) - { - int x_start = (int)floor((float) j / owidth * iwidth); - /* retrieve position of max */ - long maxi = indy_p_k[i*owidth + j] - TH_INDEX_BASE + y_start; - long maxj = indx_p_k[i*owidth + j] - TH_INDEX_BASE + x_start; - - /* update gradient */ - gradInput_p_k[maxi*iwidth + maxj] += gradOutput_p_k[i*owidth + j]; - } - } - } -} - -void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices) -{ - int dimw = 2; - int dimh = 1; - long nbatch = 1; - int nslices; - int iheight; - int iwidth; - int oheight; - int owidth; - real *gradInput_data; - real *gradOutput_data; - THIndex_t *indices_data; - - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - if (input->nDimension == 4) { - nbatch = input->size[0]; - dimw++; - dimh++; - } - - /* sizes */ - nslices = input->size[dimh-1]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - oheight = gradOutput->size[dimh]; - owidth = gradOutput->size[dimw]; - - /* get raw pointers */ - gradInput_data = THTensor_(data)(gradInput); - gradOutput_data = THTensor_(data)(gradOutput); - indices_data = THIndexTensor_(data)(indices); - - /* backprop */ - if (input->nDimension == 3) - { - THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data, gradOutput_data, - indices_data+nslices*owidth*oheight, indices_data, - nslices, - iwidth, iheight, - owidth, oheight); - } - else - { - long p; -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) - { - THNN_(SpatialAdaptiveMaxPooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight, - indices_data+(p+nbatch)*nslices*owidth*oheight, indices_data+p*nslices*owidth*oheight, - nslices, - iwidth, iheight, - owidth, oheight); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialAveragePooling.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialAveragePooling.c deleted file mode 100644 index c063502e7d..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialAveragePooling.c +++ /dev/null @@ -1,329 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialAveragePooling.c" -#else - -static inline void THNN_(SpatialAveragePooling_shapeCheck)( - THTensor *input, THTensor *gradOutput, - int kH, int kW, int dH, int dW, int padH, int padW, - bool ceil_mode) { - - THArgCheck(kW > 0 && kH > 0, 5, - "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); - THArgCheck(dW > 0 && dH > 0, 8, - "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); - - int ndim = input->nDimension; - int dimf = 0; - int dimh = 1; - int dimw = 2; - - if (ndim == 4) { - dimf++; - dimh++; - dimw++; - } - - THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input, - "3D or 4D input tensor expected but got: %s"); - - THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, - "pad should be smaller than half of kernel size, but got " - "padW = %d, padH = %d, kW = %d, kH = %d", - padW, padH, kW, kH); - - long nInputPlane = input->size[dimh-1]; - long inputHeight = input->size[dimh]; - long inputWidth = input->size[dimw]; - long outputHeight, outputWidth; - long nOutputPlane = nInputPlane; - - if(ceil_mode) - { - outputHeight = (long)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1; - outputWidth = (long)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1; - } - else - { - outputHeight = (long)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1; - outputWidth = (long)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1; - } - - if (padW || padH) - { - // ensure that the last pooling starts inside the image - // needed to avoid problems in ceil mode - if ((outputHeight - 1)*dH >= inputHeight + padH) - --outputHeight; - if ((outputWidth - 1)*dW >= inputWidth + padW) - --outputWidth; - } - - if (outputWidth < 1 || outputHeight < 1) - THError("Given input size: (%dx%dx%d). " - "Calculated output size: (%dx%dx%d). Output size is too small", - nInputPlane,inputHeight,inputWidth,nInputPlane,outputHeight,outputWidth); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); - } -} - -void THNN_(SpatialAveragePooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - bool ceil_mode, - bool count_include_pad) -{ - real *output_data; - real *input_data; - - int dimw = 2; - int dimh = 1; - int dimc = 0; - long nbatch = 1; - - long inputWidth; - long inputHeight; - long outputWidth; - long outputHeight; - long nInputPlane; // number of channels (or colors) - - long k; - - THNN_(SpatialAveragePooling_shapeCheck) - (input, NULL, kH, kW, dH, dW, padH, padW, ceil_mode); - - if (input->nDimension == 4) { - nbatch = input->size[0]; - dimw++; - dimh++; - dimc++; - } - - inputWidth = input->size[dimw]; - inputHeight = input->size[dimh]; - nInputPlane = input->size[dimc]; - - if(ceil_mode) - { - outputWidth = (long)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1; - outputHeight = (long)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1; - } - else - { - outputWidth = (long)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1; - outputHeight = (long)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1; - } - if (padW || padH) - { - // ensure that the last pooling starts inside the image - // needed to avoid problems in ceil mode - if ((outputHeight - 1)*dH >= inputHeight + padH) - --outputHeight; - if ((outputWidth - 1)*dW >= inputWidth + padW) - --outputWidth; - } - - if (input->nDimension == 3) - THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth); - else - THTensor_(resize4d)(output, input->size[0], nInputPlane, outputHeight, outputWidth); - - input = THTensor_(newContiguous)(input); - THArgCheck(THTensor_(isContiguous)(output), 3, "output must be contiguous"); - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - -#pragma omp parallel for private(k) - for(k = 0; k < nInputPlane; k++) - { - long p; - for(p = 0; p < nbatch; p++) - { - long xx, yy; - /* For all output pixels... */ - real *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight; - real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; - long i; - for(i = 0; i < outputWidth*outputHeight; i++) - ptr_output[i] = 0; - - for(yy = 0; yy < outputHeight; yy++) - { - for(xx = 0; xx < outputWidth; xx++) - { - /* Compute the mean of the input image... */ - long hstart = yy * dH - padH; - long wstart = xx * dW - padW; - long hend = fminf(hstart + kH, inputHeight + padH); - long wend = fminf(wstart + kW, inputWidth + padW); - int pool_size = (hend - hstart) * (wend - wstart); - hstart = fmaxf(hstart, 0); - wstart = fmaxf(wstart, 0); - hend = fminf(hend, inputHeight); - wend = fminf(wend, inputWidth); - - real sum = 0; - - int divide_factor; - if(count_include_pad) - divide_factor = pool_size; - else - divide_factor = (hend - hstart) * (wend - wstart); - - long kx, ky; - - for(ky = hstart; ky < hend; ky++) - { - for(kx = wstart; kx < wend; kx++) - sum += ptr_input[ky*inputWidth + kx]; - } - /* Update output */ - *ptr_output++ += sum/divide_factor; - } - } - } - } - THTensor_(free)(input); -} - -void THNN_(SpatialAveragePooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - bool ceil_mode, - bool count_include_pad) -{ - int dimw = 2; - int dimh = 1; - int dimc = 0; - long nbatch = 1; - long ndim = 3; - - long inputWidth; - long inputHeight; - long outputWidth; - long outputHeight; - long nInputPlane; // number of channels (or colors) - - real *gradOutput_data; - real *input_data, *gradInput_data; - - long k; - - THNN_(SpatialAveragePooling_shapeCheck) - (input, gradOutput, kH, kW, dH, dW, padH, padW, ceil_mode); - - - if (input->nDimension == 4) { - nbatch = input->size[0]; - dimw++; - dimh++; - dimc++; - ndim = 4; - } - - inputWidth = input->size[dimw]; - inputHeight = input->size[dimh]; - nInputPlane = input->size[dimc]; - - if(ceil_mode) - { - outputWidth = (long)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1; - outputHeight = (long)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1; - } - else - { - outputWidth = (long)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1; - outputHeight = (long)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1; - } - if (padW || padH) - { - // ensure that the last pooling starts inside the image - // needed to avoid problems in ceil mode - if ((outputHeight - 1)*dH >= inputHeight + padH) - --outputHeight; - if ((outputWidth - 1)*dW >= inputWidth + padW) - --outputWidth; - } - - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); - - THTensor_(resizeAs)(gradInput, input); - - gradOutput = THTensor_(newContiguous)(gradOutput); - THArgCheck(THTensor_(isContiguous)(gradInput), 4, "gradInput must be contiguous"); - - gradInput_data = THTensor_(data)(gradInput); - gradOutput_data = THTensor_(data)(gradOutput); - -#pragma omp parallel for private(k) - for(k = 0; k < nInputPlane; k++) - { - long p; - for(p = 0; p < nbatch; p++) - { - real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; - long xx, yy; - - real* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; - real *ptr_gradInput = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; - - long i; - for(i=0; i= 0 && cur_target < n_classes); - - real cur_weight = weights ? weights_data[cur_target] : 1.0f; - total_weight_acc += cur_weight; - output_acc -= input_data[b * sample_size + cur_target * map_size + elem] * cur_weight; - } - } - *total_weight_data = total_weight_acc; - *output_data = output_acc; - - if (sizeAverage && *total_weight_data) - *output_data /= *total_weight_data; - - THTensor_(free)(input); - THIndexTensor_(free)(target); - if (weights) - THTensor_(free)(weights); -} - -void THNN_(SpatialClassNLLCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *gradInput, - bool sizeAverage, - THTensor *weights, - THTensor *total_weight) -{ - INITIAL_CHECK; - THArgCheck(THTensor_(isContiguous)(gradInput), 4, - "gradInput must be contiguous"); - - real *total_weight_data = THTensor_(data)(total_weight); - if (*total_weight_data <= 0) - return; - - target = THIndexTensor_(newContiguous)(target); - weights = weights ? THTensor_(newContiguous)(weights) : NULL; - - THIndex_t *target_data = THIndexTensor_(data)(target); - real *weights_data = weights ? THTensor_(data)(weights) : NULL; - real *gradInput_data = THTensor_(data)(gradInput); - - long batch_size = THTensor_(size)(input, 0); - long n_classes = THTensor_(size)(input, 1); - long map_size = THTensor_(size)(input, 2) * THTensor_(size)(input, 3); - long sample_size = map_size * n_classes; - - real normalize = sizeAverage ? *total_weight_data : 1.0f; - - int b; - #pragma omp parallel for - for (b = 0; b < batch_size; b++) { - int elem; - for (elem = 0; elem < map_size; elem++) { - int cur_target = target_data[b * map_size + elem] - TH_INDEX_BASE; - THAssert(cur_target >= 0 && cur_target < n_classes); - - gradInput_data[b * sample_size + cur_target * map_size + elem] = - -(weights ? weights_data[cur_target] : 1.0f) / normalize; - } - } - - THIndexTensor_(free)(target); - if (weights) - THTensor_(free)(weights); -} - -#undef INITIAL_CHECK - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialConvolutionLocal.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialConvolutionLocal.c deleted file mode 100644 index 6db5a5db98..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialConvolutionLocal.c +++ /dev/null @@ -1,367 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialConvolutionLocal.c" -#else - -static inline void THNN_(SpatialConvolutionLocal_shapeCheck)( - THTensor *input, THTensor *gradOutput, - THTensor *weight, THTensor *bias, - int kH, int kW, int dH, - int dW, int padH, int padW, - long inputHeight, long inputWidth, - long outputHeight, long outputWidth) { - - THArgCheck(kW > 0 && kH > 0, 9, - "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); - THArgCheck(dW > 0 && dH > 0, 11, - "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); - - int ndim = input->nDimension; - int dimf = 0; - int dimh = 1; - int dimw = 2; - - if (ndim == 4) { - dimf++; - dimh++; - dimw++; - } - - THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input, - "3D or 4D input tensor expected but got: %s"); - - long nInputPlane = weight->size[2] / (kH * kW); - long nOutputPlane = weight->size[1]; - - if (bias != NULL) { - THNN_CHECK_DIM_SIZE(bias, 3, 0, nOutputPlane); - THNN_CHECK_DIM_SIZE(bias, 3, 1, outputHeight); - THNN_CHECK_DIM_SIZE(bias, 3, 2, outputWidth); - } - - THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); - } -} - -static THTensor* THNN_(view_weight_local)(THTensor *_weight) -{ - THTensor *weight = THTensor_(newContiguous)(_weight); - THArgCheck(weight->nDimension == 3 || weight->nDimension == 6, 4, - "weight tensor should be 3D or 6D - got %dD", weight->nDimension); - if (weight->nDimension == 6) { - long s1 = weight->size[0] * weight->size[1]; - long s2 = weight->size[2]; - long s3 = weight->size[3] * weight->size[4] * weight->size[5]; - THTensor *old_weight = weight; - weight = THTensor_(newWithStorage3d)(weight->storage, - weight->storageOffset, - s1, -1, s2, -1, s3, -1); - THTensor_(free)(old_weight); - } - return weight; -} - -static void THNN_(SpatialConvolutionLocal_updateOutput_frame) - ( - THTensor *input, THTensor *output, - THTensor *weight, THTensor *bias, THTensor *finput, - int kW, int kH, int dW, int dH, int padW, int padH, - long nInputPlane, long inputWidth, long inputHeight, - long nOutputPlane, long outputWidth, long outputHeight) -{ - long i; - THTensor *output3d, *finput3d; - - THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - outputWidth, outputHeight); - - THTensor_(copy)(output, bias); - - output3d = THTensor_(newWithStorage3d) - (output->storage, output->storageOffset, - outputHeight * outputWidth, 1, - nOutputPlane, outputHeight * outputWidth, - 1, nOutputPlane * outputHeight * outputWidth); - - finput3d = THTensor_(newWithStorage3d) - (finput->storage, finput->storageOffset, - outputHeight * outputWidth, 1, - kW * kH * nInputPlane, outputHeight * outputWidth, - 1, kW * kH * nInputPlane * outputHeight * outputWidth); - - // weight: oH*oW x nOutputPlane x nInputPlane*kH*kW - // finput3d: oH*oW x nInputPlane*kH*kW x 1 - THTensor_(baddbmm)(output3d, 1.0, output3d, 1.0, weight, finput3d); - // output3d: oH*oW x nOutputPlane x 1 - - THTensor_(free)(output3d); - THTensor_(free)(finput3d); -} - -void THNN_(SpatialConvolutionLocal_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH, - long inputWidth, long inputHeight, - long outputWidth, long outputHeight) -{ - weight = THNN_(view_weight_local)(weight); - - THNN_(SpatialConvolutionLocal_shapeCheck) - (input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, - inputHeight, inputWidth, outputHeight, outputWidth); - - input = THTensor_(newContiguous)(input); - - long nInputPlane = THTensor_(size)(weight, 2)/ (kW * kH); - long nOutputPlane = THTensor_(size)(weight, 1); - - if(input->nDimension == 3) - { - THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth); - THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); - - THNN_(SpatialConvolutionLocal_updateOutput_frame) - (input, output, weight, bias, finput, - kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - nOutputPlane, outputWidth, outputHeight); - } - else - { - long T = input->size[0]; - long t; - - THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth); - THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth); - -#pragma omp parallel for private(t) - for(t = 0; t < T; t++) - { - THTensor *input_t = THTensor_(newSelect)(input, 0, t); - THTensor *output_t = THTensor_(newSelect)(output, 0, t); - THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); - - THNN_(SpatialConvolutionLocal_updateOutput_frame) - (input_t, output_t, weight, bias, finput_t, - kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - nOutputPlane, outputWidth, outputHeight); - - THTensor_(free)(input_t); - THTensor_(free)(output_t); - THTensor_(free)(finput_t); - } - } - - THTensor_(free)(input); - THTensor_(free)(weight); -} - - -static void THNN_(SpatialConvolutionLocal_updateGradInput_frame) - (THTensor *gradInput, THTensor *gradOutput, - THTensor *weight, THTensor *fgradInput, - int kW, int kH, int dW, int dH, int padW, int padH, - long nInputPlane, long inputWidth, long inputHeight, - long nOutputPlane, long outputWidth, long outputHeight) -{ - THTensor *gradOutput3d, *fgradInput3d; - gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset, - outputHeight*outputWidth, 1, - nOutputPlane, outputHeight*outputWidth, - 1, nOutputPlane*outputHeight*outputWidth); - fgradInput3d = THTensor_(newWithStorage3d)(fgradInput->storage, fgradInput->storageOffset, - outputHeight*outputWidth, 1, - kW*kH*nInputPlane, outputHeight*outputWidth, - 1, kW*kH*nInputPlane*outputHeight*outputWidth); - // weight: oH*oW x nInputPlane*kH*kW x nOutputPlane - // gradOutput3d: oH*oW x nOutputPlane x 1 - THTensor_(baddbmm)(fgradInput3d, 0.0, fgradInput3d, 1.0, weight, gradOutput3d); - // fgradInput3d: oH*oW x nInputPlane*kH*kW x 1 - - THTensor_(free)(gradOutput3d); - THTensor_(free)(fgradInput3d); - - THTensor_(zero)(gradInput); - - THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - outputWidth, outputHeight); - -} - -void THNN_(SpatialConvolutionLocal_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH, - long inputWidth, long inputHeight, - long outputWidth, long outputHeight) -{ - weight = THNN_(view_weight_local)(weight); - - THNN_(SpatialConvolutionLocal_shapeCheck) - (input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, - inputHeight, inputWidth, outputHeight, outputWidth); - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - long nInputPlane = THTensor_(size)(weight,2)/(kW*kH); - long nOutputPlane = THTensor_(size)(weight,1); - - THTensor_(resizeAs)(gradInput, input); - THTensor_(resizeAs)(fgradInput, finput); - - THTensor *tweight = THTensor_(new)(); - THTensor_(transpose)(tweight, weight, 1, 2); - - if(input->nDimension == 3) - { - THNN_(SpatialConvolutionLocal_updateGradInput_frame) - (gradInput, gradOutput, tweight, - fgradInput, kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - nOutputPlane, outputWidth, outputHeight); - } - else - { - long T = input->size[0]; - long t; - -#pragma omp parallel for private(t) - for(t = 0; t < T; t++) - { - THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); - THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); - THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); - - THNN_(SpatialConvolutionLocal_updateGradInput_frame) - (gradInput_t, gradOutput_t, tweight, fgradInput_t, - kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - nOutputPlane, outputWidth, outputHeight); - - THTensor_(free)(gradInput_t); - THTensor_(free)(gradOutput_t); - THTensor_(free)(fgradInput_t); - } - } - - THTensor_(free)(tweight); - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(weight); -} - -static void THNN_(SpatialConvolutionLocal_accGradParameters_frame) - (THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, - THTensor *finput, real scale, - int kW, int kH, int dW, int dH, int padW, int padH, - long nInputPlane, long inputWidth, long inputHeight, - long nOutputPlane, long outputWidth, long outputHeight) -{ - - THTensor *gradOutput3d, *finput3d; - gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset, - outputHeight*outputWidth, 1, - nOutputPlane, outputHeight*outputWidth, - 1, nOutputPlane*outputHeight*outputWidth); - finput3d = THTensor_(newWithStorage3d)(finput->storage, finput->storageOffset, - outputHeight*outputWidth, 1, - 1, kW*kH*nInputPlane*outputHeight*outputWidth, - kW*kH*nInputPlane, outputHeight*outputWidth); - // gradOutput3d: oH*oW x nOutputPlane x 1 - // finput3d: oH*oW x 1 x kW*kH*nInputPlane - THTensor_(baddbmm)(gradWeight, 1.0, gradWeight, scale, gradOutput3d, finput3d); - // gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane - - THTensor_(cadd)(gradBias, gradBias, scale, gradOutput); - - THTensor_(free)(gradOutput3d); - THTensor_(free)(finput3d); -} - -void THNN_(SpatialConvolutionLocal_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH, - long inputWidth, long inputHeight, - long outputWidth, long outputHeight, - accreal scale_) -{ - THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); - THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - gradWeight = THNN_(view_weight_local)(gradWeight); - - THNN_(SpatialConvolutionLocal_shapeCheck) - (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, - inputHeight, inputWidth, outputHeight, outputWidth); - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - - long nInputPlane = THTensor_(size)(gradWeight,2)/(kW*kH); - long nOutputPlane = THTensor_(size)(gradWeight,1); - - if(input->nDimension == 3) - { - THNN_(SpatialConvolutionLocal_accGradParameters_frame) - (gradOutput, gradWeight, gradBias, finput, scale, - kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - nOutputPlane, outputWidth, outputHeight); - } - else - { - long T = input->size[0]; - long t; - - for(t = 0; t < T; t++) - { - THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); - THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); - - THNN_(SpatialConvolutionLocal_accGradParameters_frame) - (gradOutput_t, gradWeight, gradBias, finput_t, scale, - kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - nOutputPlane, outputWidth, outputHeight); - - THTensor_(free)(gradOutput_t); - THTensor_(free)(finput_t); - } - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(gradWeight); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialConvolutionMM.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialConvolutionMM.c deleted file mode 100644 index 28fea517c6..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialConvolutionMM.c +++ /dev/null @@ -1,377 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialConvolutionMM.c" -#else - -static inline void THNN_(SpatialConvolutionMM_shapeCheck)( - THTensor *input, THTensor *gradOutput, - THTensor *weight, THTensor *bias, - int kH, int kW, int dH, int dW, int padH, int padW) { - - THArgCheck(kW > 0 && kH > 0, 9, - "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); - THArgCheck(dW > 0 && dH > 0, 11, - "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); - THNN_ARGCHECK(weight->nDimension == 2 || weight->nDimension == 4, 5, weight, - "2D or 4D weight tensor expected, but got: %s"); - - if (bias != NULL) { - THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]); - } - - int ndim = input->nDimension; - int dimf = 0; - int dimh = 1; - int dimw = 2; - - if (ndim == 4) { - dimf++; - dimh++; - dimw++; - } - - THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input, - "3D or 4D input tensor expected but got: %s"); - - long nInputPlane = weight->size[1] / (kH * kW); - long inputHeight = input->size[dimh]; - long inputWidth = input->size[dimw]; - long nOutputPlane = weight->size[0]; - long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; - long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; - - if (outputWidth < 1 || outputHeight < 1) - THError("Given input size: (%d x %d x %d). " - "Calculated output size: (%d x %d x %d). Output size is too small", - nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); - - THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); - } -} - -static THTensor* THNN_(view_weight_MM2d)(THTensor *weight) { - weight = THTensor_(newContiguous)(weight); - if (weight->nDimension == 4) { - long s1 = weight->size[0]; - long s2 = weight->size[1] * weight->size[2] * weight->size[3]; - THTensor *old_weight = weight; - weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset, - s1, -1, s2, -1); - THTensor_(free)(old_weight); - } - return weight; -} - -static void THNN_(SpatialConvolutionMM_updateOutput_frame)( - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - long nInputPlane, - long inputWidth, - long inputHeight, - long nOutputPlane, - long outputWidth, - long outputHeight) -{ - long i; - THTensor *output2d; - - THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - outputWidth, outputHeight); - - output2d = THTensor_(newWithStorage2d)(output->storage, output->storageOffset, - nOutputPlane, -1, - outputHeight*outputWidth, -1); - if (bias) { - for(i = 0; i < nOutputPlane; i++) - THVector_(fill) - (output->storage->data + output->storageOffset + output->stride[0] * i, - THTensor_(get1d)(bias, i), outputHeight*outputWidth); - } else { - THTensor_(zero)(output); - } - - THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); - - THTensor_(free)(output2d); -} - -void THNN_(SpatialConvolutionMM_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - THTensor *fgradInput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH) -{ - weight = THNN_(view_weight_MM2d)(weight); - - THNN_(SpatialConvolutionMM_shapeCheck) - (input, NULL, weight, bias, kH, kW, dH, dW, padH, padW); - - input = THTensor_(newContiguous)(input); - int ndim = input->nDimension; - int dimf = 0; - int dimh = 1; - int dimw = 2; - - if (ndim == 4) { - dimf++; - dimh++; - dimw++; - } - - long nInputPlane = input->size[dimf]; - long inputHeight = input->size[dimh]; - long inputWidth = input->size[dimw]; - long nOutputPlane = weight->size[0]; - long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; - long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; - - if(input->nDimension == 3) - { - THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth); - THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); - - THNN_(SpatialConvolutionMM_updateOutput_frame) - (input, output, weight, bias, finput, - kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - nOutputPlane, outputWidth, outputHeight); - } - else - { - long T = input->size[0]; - long t; - - THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth); - THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth); - -#pragma omp parallel for private(t) - for(t = 0; t < T; t++) - { - THTensor *input_t = THTensor_(newSelect)(input, 0, t); - THTensor *output_t = THTensor_(newSelect)(output, 0, t); - THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); - - THNN_(SpatialConvolutionMM_updateOutput_frame) - (input_t, output_t, weight, bias, finput_t, - kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - nOutputPlane, outputWidth, outputHeight); - - THTensor_(free)(input_t); - THTensor_(free)(output_t); - THTensor_(free)(finput_t); - } - } - - THTensor_(free)(input); - THTensor_(free)(weight); -} - -static void THNN_(SpatialConvolutionMM_updateGradInput_frame)( - THTensor *gradInput, - THTensor *gradOutput, - THTensor *weight, - THTensor *fgradInput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH) -{ - THTensor *gradOutput2d = THTensor_(newWithStorage2d) - (gradOutput->storage, gradOutput->storageOffset, - gradOutput->size[0], -1, - gradOutput->size[1]*gradOutput->size[2], -1); - THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); - THTensor_(free)(gradOutput2d); - - THTensor_(zero)(gradInput); - - THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, - padW, padH, - gradInput->size[0], gradInput->size[2], gradInput->size[1], - gradOutput->size[2], gradOutput->size[1]); -} - -void THNN_(SpatialConvolutionMM_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH) -{ - weight = THNN_(view_weight_MM2d)(weight); - - THNN_(SpatialConvolutionMM_shapeCheck) - (input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW); - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - - THTensor_(resizeAs)(gradInput, input); - THTensor_(resizeAs)(fgradInput, finput); - - // depending on the BLAS library, fgradInput (result tensor) might - // be left uninitialized on zero alpha, which might lead to weird behavior - // hence, to be safe, zero it - THTensor_(zero)(fgradInput); - THTensor *tweight = THTensor_(new)(); - THTensor_(transpose)(tweight, weight, 0, 1); - - if(input->nDimension == 3) - { - THNN_(SpatialConvolutionMM_updateGradInput_frame)(gradInput, gradOutput, - tweight, fgradInput, - kW, kH, dW, dH, padW, padH); - } - else - { - long T = input->size[0]; - long t; - -#pragma omp parallel for private(t) - for(t = 0; t < T; t++) - { - THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); - THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); - THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); - - THNN_(SpatialConvolutionMM_updateGradInput_frame)(gradInput_t, gradOutput_t, - tweight, fgradInput_t, - kW, kH, dW, dH, padW, padH); - - THTensor_(free)(gradInput_t); - THTensor_(free)(gradOutput_t); - THTensor_(free)(fgradInput_t); - } - } - - THTensor_(free)(tweight); - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(weight); -} - -static void THNN_(SpatialConvolutionMM_accGradParameters_frame)( - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - real scale) -{ - long i; - THTensor *gradOutput2d = THTensor_(newWithStorage2d) - (gradOutput->storage, gradOutput->storageOffset, - gradOutput->size[0], -1, - gradOutput->size[1]*gradOutput->size[2], -1); - - THTensor *tfinput = THTensor_(new)(); - THTensor_(transpose)(tfinput, finput, 0, 1); - THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput); - THTensor_(free)(tfinput); - - if (gradBias) { - for(i = 0; i < gradBias->size[0]; i++) - { - long k; - real sum = 0; - real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0]; - for(k = 0; k < gradOutput2d->size[1]; k++) - sum += data[k]; - (gradBias->storage->data + gradBias->storageOffset)[i] += scale*sum; - } - } - - THTensor_(free)(gradOutput2d); -} - -void THNN_(SpatialConvolutionMM_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - THTensor *fgradInput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - accreal scale_) -{ - THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); - if (gradBias) - THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); - - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - gradWeight = THNN_(view_weight_MM2d)(gradWeight); - - THNN_(SpatialConvolutionMM_shapeCheck) - (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW); - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - - if(input->nDimension == 3) - { - THNN_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, - gradBias, finput, scale); - } - else - { - long T = input->size[0]; - long t; - - for(t = 0; t < T; t++) - { - THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); - THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); - - THNN_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, - gradBias, finput_t, scale); - - THTensor_(free)(gradOutput_t); - THTensor_(free)(finput_t); - } - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(gradWeight); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialConvolutionMap.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialConvolutionMap.c deleted file mode 100644 index 142a03551d..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialConvolutionMap.c +++ /dev/null @@ -1,277 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialConvolutionMap.c" -#else - -void THNN_(SpatialConvolutionMap_updateOutput)( - THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, - THTensor *connTable, int nInputPlane, int nOutputPlane, - int dW, int dH) -{ - THArgCheck( - weight != NULL && weight->nDimension == 3 - && connTable != NULL && connTable->size[0] == weight->size[0], 4, - "3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE - ); - - int dimw = 2; - int dimh = 1; - int dimc = 0; - long nbatch = 1; - - THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D(batch mode) tensor expected"); - - if (input->nDimension == 4) - { - nbatch = input->size[0]; - dimc++; - dimw++; - dimh++; - } - - const long kH = weight->size[1]; - const long kW = weight->size[2]; - - THArgCheck(input->size[dimc] >= nInputPlane, 2, "invalid number of input planes"); - THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH, 2, "input image smaller than kernel size"); - - const long input_w = input->size[dimw]; - const long input_h = input->size[dimh]; - const long output_w = (input_w - kW) / dW + 1; - const long output_h = (input_h - kH) / dH + 1; - - if (input->nDimension == 3) - THTensor_(resize3d)(output, nOutputPlane, output_h, output_w); - else - THTensor_(resize4d)(output, input->size[0], nOutputPlane, output_h, output_w); - - /* contiguous */ - input = THTensor_(newContiguous)(input); - output = THTensor_(newContiguous)(output); - weight = THTensor_(newContiguous)(weight); - bias = bias ? THTensor_(newContiguous)(bias) : bias; - connTable = THTensor_(newContiguous)(connTable); - - /* get raw pointers */ - real *input_data = THTensor_(data)(input); - real *output_data = THTensor_(data)(output); - real *weight_data = THTensor_(data)(weight); - real *bias_data = THTensor_(data)(bias); - real *connTable_data = THTensor_(data)(connTable); - - long p; -#pragma omp parallel for private(p) - for (p = 0; p < nOutputPlane; p++) - { - long m; - for (m = 0; m < nbatch; m++) - { - /* add bias */ - real *ptr_output = output_data + p*output_w*output_h + m*nOutputPlane*output_w*output_h; - long j, k; - real z= bias_data[p]; - for (j = 0; j < output_h*output_w; j++) - ptr_output[j] = z; - - /* convolve all maps */ - int nweight = connTable->size[0]; - for (k = 0; k < nweight; k++) - { - /* get offsets for input/output */ - int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE; - int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE; - - if (o == p) - { - THTensor_(validXCorr2Dptr)( - output_data + o*output_w*output_h + m*nOutputPlane*output_w*output_h, - 1.0, - input_data + i*input_w*input_h + m*nInputPlane*input_w*input_h, input_h, input_w, - weight_data + k*kW*kH, - kH, kW, - dH, dW - ); - } - } - } - } - - /* clean up */ - THTensor_(free)(input); - THTensor_(free)(output); - THTensor_(free)(weight); - if (bias) THTensor_(free)(bias); - THTensor_(free)(connTable); -} - -void THNN_(SpatialConvolutionMap_updateGradInput)( - THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *bias, - THTensor *connTable, int nInputPlane, int nOutputPlane, - int dW, int dH) -{ - THArgCheck( - weight != NULL && weight->nDimension == 3 - && connTable != NULL && connTable->size[0] == weight->size[0], 5, - "3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE - ); - - /* and dims */ - int dimw = 2; - int dimh = 1; - long nbatch = 1; - if (input->nDimension == 4) - { - nbatch = input->size[0]; - dimw++; - dimh++; - } - - const long input_h = input->size[dimh]; - const long input_w = input->size[dimw]; - const long output_h = gradOutput->size[dimh]; - const long output_w = gradOutput->size[dimw]; - const long kH = weight->size[1]; - const long kW = weight->size[2]; - - /* contiguous */ - gradInput = THTensor_(newContiguous)(gradInput); - gradOutput = THTensor_(newContiguous)(gradOutput); - weight = THTensor_(newContiguous)(weight); - connTable = THTensor_(newContiguous)(connTable); - - /* Resize/Zero */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - /* get raw pointers */ - real *gradInput_data = THTensor_(data)(gradInput); - real *gradOutput_data = THTensor_(data)(gradOutput); - real *weight_data = THTensor_(data)(weight); - real *connTable_data = THTensor_(data)(connTable); - - long p; -#pragma omp parallel for private(p) - for (p = 0; p < nInputPlane; p++) - { - long m; - for (m = 0; m < nbatch; m++) - { - long k; - /* backward all */ - int nkernel = connTable->size[0]; - for (k = 0; k < nkernel; k++) - { - int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE; - int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE; - if (i == p) - { - /* gradient to input */ - THTensor_(fullConv2Dptr)( - gradInput_data + i*input_w*input_h + m*nInputPlane*input_w*input_h, 1.0, - gradOutput_data + o*output_w*output_h + m*nOutputPlane*output_w*output_h, output_h, output_w, - weight_data + k*kW*kH, kH, kW, dH, dW - ); - } - } - } - } - - /* clean up */ - THTensor_(free)(gradInput); - THTensor_(free)(gradOutput); - THTensor_(free)(weight); - THTensor_(free)(connTable); -} - -void THNN_(SpatialConvolutionMap_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *connTable, - int nInputPlane, - int nOutputPlane, - int dW, int dH, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - THArgCheck( - gradWeight != NULL && gradWeight->nDimension == 3 - && connTable != NULL && connTable->size[0] == gradWeight->size[0], 5, - "3D gradWeight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE - ); - - /* and dims */ - int dimw = 2; - int dimh = 1; - long nbatch = 1; - if (input->nDimension == 4) - { - nbatch = input->size[0]; - dimw++; - dimh++; - } - - const long input_h = input->size[dimh]; - const long input_w = input->size[dimw]; - const long output_h = gradOutput->size[dimh]; - const long output_w = gradOutput->size[dimw]; - const long kH = gradWeight->size[1]; - const long kW = gradWeight->size[2]; - - /* contiguous */ - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); - THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); - - /* get raw pointers */ - real *input_data = THTensor_(data)(input); - real *gradOutput_data = THTensor_(data)(gradOutput); - real *gradWeight_data = THTensor_(data)(gradWeight); - real *gradBias_data = THTensor_(data)(gradBias); - - - long k; - /* gradients wrt bias */ -#pragma omp parallel for private(k) - for (k = 0; k < nOutputPlane; k++) - { - long m; - for (m = 0; m < nbatch; m++) - { - real *ptr_gradOutput = gradOutput_data + k*output_w*output_h + m*nOutputPlane*output_w*output_h; - long l; - for (l = 0; l < output_h*output_w; l++) - gradBias_data[k] += scale*ptr_gradOutput[l]; - } - } - - /* gradients wrt weight */ - const int nkernel = connTable->size[0]; -#pragma omp parallel for private(k) - for (k = 0; k < nkernel; k++) - { - long m; - for (m = 0; m < nbatch; m++) - { - int o = (int)THTensor_(get2d)(connTable,k,1) - TH_INDEX_BASE; - int i = (int)THTensor_(get2d)(connTable,k,0) - TH_INDEX_BASE; - - /* gradient to kernel */ - THTensor_(validXCorr2DRevptr)( - gradWeight_data + k*kW*kH, - scale, - input_data + i*input_w*input_h + m*nInputPlane*input_w*input_h, input_h, input_w, - gradOutput_data + o*output_w*output_h + m*nOutputPlane*output_w*output_h , output_h, output_w, - dH, dW - ); - } - } - - /* clean up */ - THTensor_(free)(input); - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialDepthWiseConvolution.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialDepthWiseConvolution.c deleted file mode 100644 index efb66a3e31..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialDepthWiseConvolution.c +++ /dev/null @@ -1,528 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialDepthWiseConvolution.c" -#else - -static inline void THNN_(SpatialDepthWiseConvolution_shapeCheck)( - THTensor *input, THTensor *gradOutput, - THTensor *weight, THTensor *bias, - int kH, int kW, int dH, int dW, int padH, int padW) { - - THArgCheck(kW > 0 && kH > 0, 9, - "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); - THArgCheck(dW > 0 && dH > 0, 11, - "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); - THNN_ARGCHECK(weight->nDimension == 4, 5, weight, - "2D or 4D weight tensor expected, but got: %s"); - - if (bias != NULL) { - THNN_CHECK_DIM_SIZE(bias, 2, 0, weight->size[0]); - THNN_CHECK_DIM_SIZE(bias, 2, 1, weight->size[1]); - } - - int ndim = input->nDimension; - int dimf = 0; - int dimh = 1; - int dimw = 2; - - if (ndim == 4) { - dimf++; - dimh++; - dimw++; - } - - THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input, - "3D or 4D input tensor expected but got: %s"); - - long nInputPlane = weight->size[1]; - long inputHeight = input->size[dimh]; - long inputWidth = input->size[dimw]; - long nOutputPlane = weight->size[0]; - long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; - long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; - - if (outputWidth < 1 || outputHeight < 1) - THError("Given input size: (%d x %d x %d). " - "Calculated output size: (%d x %d x %d). Output size is too small", - nInputPlane,inputHeight,inputWidth,nOutputPlane*nInputPlane,outputHeight,outputWidth); - - THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim + 1, dimf, nInputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim + 1, dimh, nOutputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim + 1, dimw, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim + 1, dimw + 1, outputWidth); - } -} - -static void THNN_(SpatialDepthWiseConvolution_updateOutput_frame)( - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - long nInputPlane, - long inputWidth, - long inputHeight, - long nOutputPlane, - long outputWidth, - long outputHeight) -{ - long i; - THTensor *output2d; - - THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH, - nInputPlane, inputWidth, inputHeight, - outputWidth, outputHeight); - - output2d = THTensor_(newWithStorage2d)(output->storage, output->storageOffset, - nOutputPlane, -1, - outputHeight*outputWidth, -1); - if (bias) { - for(i = 0; i < nOutputPlane; i++) - THVector_(fill) - (output->storage->data + output->storageOffset + output->stride[0] * i, - THTensor_(get1d)(bias, i), outputHeight*outputWidth); - } else { - THTensor_(zero)(output); - } - - THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); - - THTensor_(free)(output2d); -} - -void THNN_(SpatialDepthWiseConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - THTensor *fgradInput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH) -{ - long nInputPlane = weight->nDimension == 2 ? weight->size[1]/(kH*kW) : weight->size[1]; - long nOutputPlane = weight->size[0]; - if (weight->nDimension == 2) { - THTensor_(resize4d)(weight, nOutputPlane, nInputPlane, kH, kW); - } - - THNN_(SpatialDepthWiseConvolution_shapeCheck) - (input, NULL, weight, bias, kH, kW, dH, dW, padH, padW); - - THTensor *_weight = THTensor_(newTranspose)(weight, 0, 1); - weight = THTensor_(newContiguous)(_weight); - - THTensor *_bias = NULL; - if(bias) { - _bias = THTensor_(newTranspose)(bias, 0, 1); - bias = THTensor_(newContiguous)(_bias); - } - - // resize weight - long s1 = weight->size[0]; - long s2 = weight->size[1]; - long s3 = weight->size[2] * weight->size[3]; - weight = THTensor_(newWithStorage3d)(weight->storage, weight->storageOffset, - s1, -1, s2, -1, s3, -1); - - input = THTensor_(newContiguous)(input); - - int ndim = input->nDimension; - - int batch = 1; - if (ndim == 3) { - // Force batch - batch = 0; - THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); - } - - long inputHeight = input->size[3]; - long inputWidth = input->size[2]; - long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; - long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; - - long T = input->size[0]; - long t; - - THTensor_(resize5d)(output, T, nInputPlane, nOutputPlane, outputHeight, outputWidth); - THTensor_(resize4d)(finput, T, nInputPlane, kW*kH*1, outputHeight*outputWidth); - -#pragma omp parallel for private(t) - for(t = 0; t < T; t++) - { - THTensor *input_t = THTensor_(newSelect)(input, 0, t); - THTensor *output_t = THTensor_(newSelect)(output, 0, t); - THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); - - long i; -#pragma omp parallel for private(i) - for(i = 0; i < nInputPlane; i++) - { - THTensor *weight_i = THTensor_(newSelect)(weight, 0, i); - THTensor *input_i = THTensor_(newNarrow)(input_t, 0, i, 1); - THTensor *output_i = THTensor_(newSelect)(output_t, 0, i); - THTensor *finput_i = THTensor_(newSelect)(finput_t, 0, i); - THTensor *bias_i = NULL; - if(bias) { - bias_i = THTensor_(newSelect)(bias, 0, i); - } - THNN_(SpatialDepthWiseConvolution_updateOutput_frame) - (input_i, output_i, weight_i, bias_i, finput_i, - kW, kH, dW, dH, padW, padH, - 1, inputWidth, inputHeight, - nOutputPlane, outputWidth, outputHeight); - - THTensor_(free)(input_i); - THTensor_(free)(weight_i); - THTensor_(free)(bias_i); - THTensor_(free)(output_i); - THTensor_(free)(finput_i); - } - THTensor_(free)(input_t); - THTensor_(free)(output_t); - THTensor_(free)(finput_t); - } - - THTensor_(free)(weight); - THTensor_(free)(_weight); - THTensor_(free)(bias); - THTensor_(free)(_bias); - THTensor_(resize4d)(output, T, nInputPlane * nOutputPlane, outputHeight, outputWidth); - - if (batch == 0) { - THTensor_(select)(output, NULL, 0, 0); - THTensor_(select)(input, NULL, 0, 0); - THTensor_(select)(finput, NULL, 0, 0); - } - THTensor_(free)(input); -} - -static void THNN_(SpatialDepthWiseConvolution_updateGradInput_frame)( - THTensor *gradInput, - THTensor *gradOutput, - THTensor *weight, - THTensor *fgradInput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH) -{ - THTensor *gradOutput2d = THTensor_(newWithStorage2d) - (gradOutput->storage, gradOutput->storageOffset, - gradOutput->size[0], -1, - gradOutput->size[1]*gradOutput->size[2], -1); - THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); - THTensor_(free)(gradOutput2d); - - THTensor_(zero)(gradInput); - - THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, - padW, padH, - gradInput->size[0], gradInput->size[2], gradInput->size[1], - gradOutput->size[2], gradOutput->size[1]); -} - -void THNN_(SpatialDepthWiseConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH) -{ - long nInputPlane = weight->nDimension == 2 ? weight->size[1]/(kH*kW) : weight->size[1]; - long nOutputPlane = weight->size[0]; - if (weight->nDimension == 2) { - THTensor_(resize4d)(weight, nOutputPlane, nInputPlane, kH, kW); - } - gradOutput = THTensor_(newWithTensor)(gradOutput); - - if (input->nDimension == 3) { - if (gradOutput->nDimension == 3) { - THTensor_(resize4d)(gradOutput, nInputPlane, nOutputPlane, gradOutput->size[1], gradOutput->size[2]); - } - } - else - { - if (gradOutput->nDimension == 4) { - THTensor_(resize5d)(gradOutput, gradOutput->size[0], nInputPlane, nOutputPlane, gradOutput->size[2], gradOutput->size[3]); - } - } - - - THNN_(SpatialDepthWiseConvolution_shapeCheck) - (input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW); - - THTensor *_weight = THTensor_(newTranspose)(weight, 0, 1); - weight = THTensor_(newContiguous)(_weight); - - - // resize weight - long s1 = weight->size[0]; - long s2 = weight->size[1]; - long s3 = weight->size[2] * weight->size[3]; - weight = THTensor_(newWithStorage3d)(weight->storage, weight->storageOffset, - s1, -1, s2, -1, s3, -1); - - input = THTensor_(newContiguous)(input); - - int batch = 1; - if (input->nDimension == 3) { - // Force batch - batch = 0; - THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); - THTensor_(resize5d)(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); - } - - long inputHeight = input->size[3]; - long inputWidth = input->size[2]; - long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; - long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; - - long T = input->size[0]; - long t; - - THTensor_(resizeAs)(gradInput, input); - THTensor_(resize4d)(fgradInput, T, nInputPlane, kW*kH*1, outputHeight*outputWidth); - - // depending on the BLAS library, fgradInput (result tensor) might - // be left uninitialized on zero alpha, which might lead to weird behavior - // hence, to be safe, zero it - THTensor_(zero)(fgradInput); - - - -#pragma omp parallel for private(t) - for(t = 0; t < T; t++) - { - THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); - THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); - THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); - - - long i; -#pragma omp parallel for private(i) - for(i = 0; i < nInputPlane; i++) - { - THTensor *weight_i = THTensor_(newSelect)(weight, 0, i); - THTensor *gradInput_i = THTensor_(newNarrow)(gradInput_t, 0, i, 1); - THTensor *gradOutput_i = THTensor_(newSelect)(gradOutput_t, 0, i); - THTensor *fgradInput_i = THTensor_(newSelect)(fgradInput_t, 0, i); - - THTensor_(transpose)(weight_i, weight_i, 0, 1); - - THNN_(SpatialDepthWiseConvolution_updateGradInput_frame)(gradInput_i, gradOutput_i, - weight_i, fgradInput_i, - kW, kH, dW, dH, padW, padH); - - THTensor_(free)(gradInput_i); - THTensor_(free)(weight_i); - THTensor_(free)(gradOutput_i); - THTensor_(free)(fgradInput_i); - } - - THTensor_(free)(gradInput_t); - THTensor_(free)(gradOutput_t); - THTensor_(free)(fgradInput_t); - } - - if (batch == 0) { - THTensor_(select)(gradOutput, NULL, 0, 0); - THTensor_(select)(input, NULL, 0, 0); - THTensor_(select)(gradInput, NULL, 0, 0); - THTensor_(select)(fgradInput, NULL, 0, 0); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(weight); - THTensor_(free)(_weight); -} - -static void THNN_(SpatialDepthWiseConvolution_accGradParameters_frame)( - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - accreal scale) -{ - long i; - THTensor *gradOutput2d = THTensor_(newWithStorage2d) - (gradOutput->storage, gradOutput->storageOffset, - gradOutput->size[0], -1, - gradOutput->size[1]*gradOutput->size[2], -1); - - THTensor_(transpose)(finput, finput, 0, 1); - THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, finput); - THTensor_(transpose)(finput, finput, 0, 1); - - if (gradBias) { - for(i = 0; i < gradBias->size[0]; i++) - { - long k; - real sum = 0; - real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0]; - for(k = 0; k < gradOutput2d->size[1]; k++) - sum += data[k]; - (gradBias->storage->data + gradBias->storageOffset)[i] += scale*sum; - } - } - - THTensor_(free)(gradOutput2d); -} - -void THNN_(SpatialDepthWiseConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - THTensor *fgradInput, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - accreal scale) -{ - long nInputPlane = gradWeight->nDimension == 2 ? gradWeight->size[1]/(kH*kW) : gradWeight->size[1]; - long nOutputPlane = gradWeight->size[0]; - if (gradWeight->nDimension == 2) { - THTensor_(resize4d)(gradWeight, nOutputPlane, nInputPlane, kH, kW); - } - - gradOutput = THTensor_(newWithTensor)(gradOutput); - if (input->nDimension == 3) { - if (gradOutput->nDimension == 3) { - THTensor_(resize4d)(gradOutput, nInputPlane, nOutputPlane, gradOutput->size[1], gradOutput->size[2]); - } - } - else - { - if (gradOutput->nDimension == 4) { - THTensor_(resize5d)(gradOutput, gradOutput->size[0], nInputPlane, nOutputPlane, gradOutput->size[2], gradOutput->size[3]); - } - } - - - THNN_(SpatialDepthWiseConvolution_shapeCheck) - (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW); - - // Transpose gradWeight & gradBias - THTensor_(transpose)(gradWeight, NULL, 0, 1); - THTensor *_gradWeight; - _gradWeight = gradWeight; - gradWeight = THTensor_(newContiguous)(gradWeight); - - THTensor *_gradBias = NULL; - if(gradBias) { - THTensor_(transpose)(gradBias, NULL, 0, 1); - _gradBias = gradBias; - gradBias = THTensor_(newContiguous)(gradBias); - } - - // resize gradWeight - long s1 = gradWeight->size[0]; - long s2 = gradWeight->size[1]; - long s3 = gradWeight->size[2] * gradWeight->size[3]; - gradWeight = THTensor_(newWithStorage3d)(gradWeight->storage, gradWeight->storageOffset, - s1, -1, s2, -1, s3, -1); - - input = THTensor_(newContiguous)(input); - - - int batch = 1; - if (input->nDimension == 3) { - // Force batch - batch = 0; - THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); - THTensor_(resize5d)(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); - } - - long inputHeight = input->size[3]; - long inputWidth = input->size[2]; - long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; - long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; - - long T = input->size[0]; - long t; - THTensor_(resize4d)(finput, T, nInputPlane, kW*kH*1, outputHeight*outputWidth); - - for(t = 0; t < T; t++) - { - THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); - THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); - long i; -#pragma omp parallel for private(i) - for(i = 0; i < nInputPlane; i++) - { - THTensor *finput_i = THTensor_(newSelect)(finput_t, 0, i); - THTensor *gradOutput_i = THTensor_(newSelect)(gradOutput_t, 0, i); - THTensor *gradWeight_i = THTensor_(newSelect)(gradWeight, 0, i); - THTensor *gradBias_i = NULL; - if(gradBias) { - gradBias_i = THTensor_(newSelect)(gradBias, 0, i); - } - THNN_(SpatialDepthWiseConvolution_accGradParameters_frame)(gradOutput_i, gradWeight_i, - gradBias_i, finput_i, scale); - - THTensor_(free)(finput_i); - THTensor_(free)(gradOutput_i); - THTensor_(free)(gradWeight_i); - THTensor_(free)(gradBias_i); - } - - THTensor_(free)(gradOutput_t); - THTensor_(free)(finput_t); - } - - // Copy back and transpose back - THTensor_(transpose)(_gradWeight, NULL, 0, 1); - THTensor_(resize4d)(_gradWeight, nInputPlane, nOutputPlane, kH, kW); - THTensor_(copy)(_gradWeight, gradWeight); - THTensor_(transpose)(_gradWeight, NULL, 0, 1); - - if(gradBias) { - THTensor_(transpose)(_gradBias, NULL, 0, 1); - THTensor_(resize2d)(_gradBias, nInputPlane, nOutputPlane); - THTensor_(copy)(_gradBias, gradBias); - THTensor_(transpose)(_gradBias, NULL, 0, 1); - } - - if (batch == 0) { - THTensor_(select)(gradOutput, NULL, 0, 0); - THTensor_(select)(input, NULL, 0, 0); - THTensor_(select)(finput, NULL, 0, 0); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(gradWeight); - THTensor_(free)(gradBias); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialDilatedConvolution.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialDilatedConvolution.c deleted file mode 100644 index 897cc0da48..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialDilatedConvolution.c +++ /dev/null @@ -1,408 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialDilatedConvolution.c" -#else - -static inline void THNN_(SpatialDilatedConvolution_shapeCheck)( - THTensor *input, THTensor *gradOutput, - THTensor *weight, THTensor *bias, - int kH, int kW, int dH, int dW, int padH, int padW, - int dilationH, int dilationW) { - - THNN_ARGCHECK(weight->nDimension == 4, 4, weight, - "4D weight tensor (nOutputPlane,nInputPlane,kH,kW) expected, " - "but got: %s"); - THArgCheck(kW > 0 && kH > 0, 9, - "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); - THArgCheck(dW > 0 && dH > 0, 11, - "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); - THArgCheck(dilationW > 0 && dilationH > 0, 15, - "dilation should be greater than zero, but got dilationH: %d, dilationW: %d", - dilationH, dilationW); - - if (bias != NULL) { - THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]); - } - - int ndim = input->nDimension; - int dimf = 0; - int dimh = 1; - int dimw = 2; - - if (ndim == 4) { - dimf++; - dimh++; - dimw++; - } - - THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input, - "3D or 4D input tensor expected but got: %s"); - - long nInputPlane = weight->size[1]; - long inputHeight = input->size[dimh]; - long inputWidth = input->size[dimw]; - long nOutputPlane = weight->size[0]; - long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; - long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; - - if (outputWidth < 1 || outputHeight < 1) - THError("Given input size: (%ld x %ld x %ld). " - "Calculated output size: (%ld x %ld x %ld). Output size is too small", - nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); - - THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); - } -} - -void THNN_(SpatialDilatedConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *columns, - THTensor *ones, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int dilationW, int dilationH) -{ - - THNN_(SpatialDilatedConvolution_shapeCheck) - (input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, - dilationH, dilationW); - - // Params: - int nInputPlane = weight->size[1]; - int nOutputPlane = weight->size[0]; - - input = THTensor_(newContiguous)(input); - weight = THTensor_(newContiguous)(weight); - bias = bias ? THTensor_(newContiguous)(bias) : bias; - int batch = 1; - if (input->nDimension == 3) { - // Force batch - batch = 0; - THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); - } - long inputWidth = input->size[3]; - long inputHeight = input->size[2]; - long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - // Batch size + input planes - long batchSize = input->size[0]; - - // Resize output - THTensor_(resize4d)(output, batchSize, nOutputPlane, outputHeight, outputWidth); - THTensor_(zero)(output); - - // Resize temporary columns - THTensor_(resize2d)(columns, nInputPlane*kW*kH, outputHeight*outputWidth); - - // Define a buffer of ones, for bias accumulation - // Note: this buffer can be shared with other modules, it only ever gets increased, - // and always contains ones. - if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { - // Resize plane and fill with ones... - THTensor_(resize2d)(ones, outputHeight, outputWidth); - THTensor_(fill)(ones, 1); - } - - // Helpers - THTensor *input_n = THTensor_(new)(); - THTensor *output_n = THTensor_(new)(); - - // For each elt in batch, do: - for (int elt = 0; elt < batchSize; elt ++) { - // Matrix mulitply per output: - THTensor_(select)(input_n, input, 0, elt); - THTensor_(select)(output_n, output, 0, elt); - - // Do Bias first: - // M,N,K are dims of matrix A and B - long m_ = nOutputPlane; - long n_ = outputHeight * outputWidth; - long k_ = 1; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - if (bias) { - THBlas_(gemm)( - 't', 'n', - n_, m_, k_, - 1, - THTensor_(data)(ones), k_, - THTensor_(data)(bias), k_, - 0, - THTensor_(data)(output_n), n_ - ); - } else { - THTensor_(zero)(output_n); - } - - // Extract columns: - THNN_(im2col)( - THTensor_(data)(input_n), - nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, - dilationH, dilationW, - THTensor_(data)(columns) - ); - - // M,N,K are dims of matrix A and B - long m = nOutputPlane; - long n = columns->size[1]; - long k = nInputPlane*kH*kW; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 'n', 'n', - n, m, k, - 1, - THTensor_(data)(columns), n, - THTensor_(data)(weight), k, - 1, - THTensor_(data)(output_n), n - ); - } - - // Free - THTensor_(free)(input_n); - THTensor_(free)(output_n); - - // Resize output - if (batch == 0) { - THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); - THTensor_(resize3d)(input, nInputPlane, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(weight); - if (bias) THTensor_(free)(bias); -} - -void THNN_(SpatialDilatedConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *gradColumns, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int dilationW, int dilationH) -{ - THNN_(SpatialDilatedConvolution_shapeCheck) - (input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, - dilationH, dilationW); - - // Params - int nInputPlane = weight->size[1]; - int nOutputPlane = weight->size[0]; - - input = THTensor_(newContiguous)(input); - weight = THTensor_(newContiguous)(weight); - gradOutput = THTensor_(newContiguous)(gradOutput); - int batch = 1; - if (input->nDimension == 3) { - // Force batch - batch = 0; - THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); - THTensor_(resize4d)(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], - gradOutput->size[2]); - } - - long inputWidth = input->size[3]; - long inputHeight = input->size[2]; - long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - // Batch size + input planes - long batchSize = input->size[0]; - - // Resize output - THTensor_(resize4d)(gradInput, batchSize, nInputPlane, inputHeight, inputWidth); - - // Resize temporary columns - THTensor_(resize2d)(gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth); - THTensor_(zero)(gradColumns); - - // Helpers - THTensor *gradInput_n = THTensor_(new)(); - THTensor *gradOutput_n = THTensor_(new)(); - - // For each elt in batch, do: - for (int elt = 0; elt < batchSize; elt ++) { - // Matrix mulitply per sample: - THTensor_(select)(gradInput_n, gradInput, 0, elt); - THTensor_(select)(gradOutput_n, gradOutput, 0, elt); - - // M,N,K are dims of matrix A and B - long m = nInputPlane*kW*kH; - long n = gradColumns->size[1]; - long k = nOutputPlane; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 'n', 't', - n, m, k, - 1, - THTensor_(data)(gradOutput_n), n, - THTensor_(data)(weight), m, - 0, - THTensor_(data)(gradColumns), n - ); - - // Unpack columns back into input: - THNN_(col2im)( - THTensor_(data)(gradColumns), - nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, - dilationH, dilationW, - THTensor_(data)(gradInput_n) - ); - } - - // Free - THTensor_(free)(gradInput_n); - THTensor_(free)(gradOutput_n); - - // Resize output - if (batch == 0) { - THTensor_(resize3d)(gradOutput, nOutputPlane, outputHeight, outputWidth); - THTensor_(resize3d)(input, nInputPlane, inputHeight, inputWidth); - THTensor_(resize3d)(gradInput, nInputPlane, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(weight); -} - - -void THNN_(SpatialDilatedConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *columns, - THTensor *ones, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int dilationW, int dilationH, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - THNN_(SpatialDilatedConvolution_shapeCheck) - (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, - dilationH, dilationW); - - // Params - int nInputPlane = gradWeight->size[1]; - int nOutputPlane = gradWeight->size[0]; - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); - if (gradBias) - THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); - int batch = 1; - if (input->nDimension == 3) { - // Force batch - batch = 0; - THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); - THTensor_(resize4d)(gradOutput, 1, gradOutput->size[0], - gradOutput->size[1], gradOutput->size[2]); - } - - long inputWidth = input->size[3]; - long inputHeight = input->size[2]; - long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - // Batch size + input planes - long batchSize = input->size[0]; - - // Define a buffer of ones, for bias accumulation - if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { - // Resize plane and fill with ones... - THTensor_(resize2d)(ones, outputHeight, outputWidth); - THTensor_(fill)(ones, 1); - } - - // Resize temporary columns - THTensor_(resize2d)(columns, nInputPlane*kW*kH, outputHeight*outputWidth); - - // Helpers - THTensor *input_n = THTensor_(new)(); - THTensor *gradOutput_n = THTensor_(new)(); - - // For each elt in batch, do: - for (int elt = 0; elt < batchSize; elt ++) { - // Matrix mulitply per output: - THTensor_(select)(input_n, input, 0, elt); - THTensor_(select)(gradOutput_n, gradOutput, 0, elt); - - // Extract columns: - THNN_(im2col)( - THTensor_(data)(input_n), - nInputPlane, inputHeight, inputWidth, kH, kW, padH, padW, dH, dW, - dilationH, dilationW, - THTensor_(data)(columns) - ); - - // M,N,K are dims of matrix A and B - long m = nOutputPlane; - long n = nInputPlane*kW*kH; - long k = columns->size[1]; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 't', 'n', - n, m, k, - scale, - THTensor_(data)(columns), k, - THTensor_(data)(gradOutput_n), k, - 1, - THTensor_(data)(gradWeight), n - ); - - // Do Bias: - // M,N,K are dims of matrix A and B - long m_ = nOutputPlane; - long k_ = outputHeight * outputWidth; - - // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) - if (gradBias) { - THBlas_(gemv)( - 't', - k_, m_, - scale, - THTensor_(data)(gradOutput_n), k_, - THTensor_(data)(ones), 1, - 1, - THTensor_(data)(gradBias), 1 - ); - } - } - - // Free - THTensor_(free)(input_n); - THTensor_(free)(gradOutput_n); - - // Resize - if (batch == 0) { - THTensor_(resize3d)(gradOutput, nOutputPlane, outputHeight, outputWidth); - THTensor_(resize3d)(input, nInputPlane, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialDilatedMaxPooling.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialDilatedMaxPooling.c deleted file mode 100644 index 8f4ad13c35..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialDilatedMaxPooling.c +++ /dev/null @@ -1,401 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialDilatedMaxPooling.c" -#else - -static inline void THNN_(SpatialDilatedMaxPooling_shapeCheck)( - THTensor *input, THTensor *gradOutput, THIndexTensor *indices, - int kH, int kW, int dH, int dW, int padH, int padW, - int dilationH, int dilationW, bool ceil_mode) { - - THArgCheck(kW > 0 && kH > 0, 5, - "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); - THArgCheck(dW > 0 && dH > 0, 8, - "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); - THArgCheck(dilationH > 0 && dilationW > 0, 12, - "dilation should be greater than zero, but got dilationH: %d dilationW: %d", - dilationH, dilationW); - - int ndim = input->nDimension; - int dimf = 0; - int dimh = 1; - int dimw = 2; - - if (ndim == 4) { - dimf++; - dimh++; - dimw++; - } - - THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input, - "3D or 4D input tensor expected but got: %s"); - - THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, - "pad should be smaller than half of kernel size, but got " - "padW = %d, padH = %d, kW = %d, kH = %d", - padW, padH, kW, kH); - - long nInputPlane = input->size[dimh-1]; - long inputHeight = input->size[dimh]; - long inputWidth = input->size[dimw]; - long outputHeight, outputWidth; - long nOutputPlane = nInputPlane; - - if (ceil_mode) - { - outputHeight = (long)(ceil((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; - outputWidth = (long)(ceil((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; - } - else - { - outputHeight = (long)(floor((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; - outputWidth = (long)(floor((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; - } - - if (padW || padH) - { - // ensure that the last pooling starts inside the image - // needed to avoid problems in ceil mode - if ((outputHeight - 1)*dH >= inputHeight + padH) - --outputHeight; - if ((outputWidth - 1)*dW >= inputWidth + padW) - --outputWidth; - } - - if (outputWidth < 1 || outputHeight < 1) - THError("Given input size: (%dx%dx%d). " - "Calculated output size: (%dx%dx%d). Output size is too small", - nInputPlane,inputHeight,inputWidth,nInputPlane,outputHeight,outputWidth); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); - } - if (indices != NULL) { - THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimf, nOutputPlane); - THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimw, outputWidth); - } -} - -static void THNN_(SpatialDilatedMaxPooling_updateOutput_frame)( - real *input_p, - real *output_p, - THIndex_t *ind_p, - long nslices, - long iwidth, - long iheight, - long owidth, - long oheight, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - int dilationW, - int dilationH - ) -{ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - /* loop over output */ - long i, j; - real *ip = input_p + k*iwidth*iheight; - for(i = 0; i < oheight; i++) - { - for(j = 0; j < owidth; j++) - { - long hstart = i * dH - padH; - long wstart = j * dW - padW; - long hend = fminf(hstart + (kH - 1) * dilationH + 1, iheight); - long wend = fminf(wstart + (kW - 1) * dilationW + 1, iwidth); - while(hstart < 0) - hstart += dilationH; - while(wstart < 0) - wstart += dilationW; - - /* local pointers */ - real *op = output_p + k*owidth*oheight + i*owidth + j; - THIndex_t *indp = ind_p + k*owidth*oheight + i*owidth + j; - - /* compute local max: */ - long maxindex = -1; - real maxval = -THInf; - long tcntr = 0; - long x,y; - for(y = hstart; y < hend; y += dilationH) - { - for(x = wstart; x < wend; x += dilationW) - { - tcntr = y*iwidth + x; - real val = *(ip + tcntr); - if (val > maxval) - { - maxval = val; - maxindex = tcntr; - } - } - } - - /* set output to local max */ - *op = maxval; - - /* store location of max */ - *indp = maxindex + TH_INDEX_BASE; - } - } - } -} - -void THNN_(SpatialDilatedMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - int dilationW, - int dilationH, - bool ceil_mode) -{ - - int dimw = 2; - int dimh = 1; - long nbatch = 1; - long nInputPlane; - long inputHeight; - long inputWidth; - long outputHeight; - long outputWidth; - real *input_data; - real *output_data; - THIndex_t *indices_data; - - THNN_(SpatialDilatedMaxPooling_shapeCheck) - (input, NULL, NULL, kH, kW, dH, dW, - padH, padW, dilationH, dilationW, ceil_mode); - - if (input->nDimension == 4) - { - nbatch = input->size[0]; - dimw++; - dimh++; - } - - /* sizes */ - nInputPlane = input->size[dimh-1]; - inputHeight = input->size[dimh]; - inputWidth = input->size[dimw]; - if (ceil_mode) - { - outputHeight = (long)(ceil((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; - outputWidth = (long)(ceil((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; - } - else - { - outputHeight = (long)(floor((float)(inputHeight - (dilationH * (kH - 1) + 1) + 2*padH) / dH)) + 1; - outputWidth = (long)(floor((float)(inputWidth - (dilationW * (kW - 1) + 1) + 2*padW) / dW)) + 1; - } - - if (padW || padH) - { - // ensure that the last pooling starts inside the image - // needed to avoid problems in ceil mode - if ((outputHeight - 1)*dH >= inputHeight + padH) - --outputHeight; - if ((outputWidth - 1)*dW >= inputWidth + padW) - --outputWidth; - } - - /* get contiguous input */ - input = THTensor_(newContiguous)(input); - - /* resize output */ - if (input->nDimension == 3) - { - THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth); - /* indices will contain the locations for each output point */ - THIndexTensor_(resize3d)(indices, nInputPlane, outputHeight, outputWidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - - THNN_(SpatialDilatedMaxPooling_updateOutput_frame) - (input_data, output_data, - indices_data, - nInputPlane, - inputWidth, inputHeight, - outputWidth, outputHeight, - kW, kH, dW, dH, - padW, padH, - dilationW, dilationH - ); - } - else - { - long p; - - THTensor_(resize4d)(output, nbatch, nInputPlane, outputHeight, outputWidth); - /* indices will contain the locations for each output point */ - THIndexTensor_(resize4d)(indices, nbatch, nInputPlane, outputHeight, outputWidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) - { - THNN_(SpatialDilatedMaxPooling_updateOutput_frame) - (input_data+p*nInputPlane*inputWidth*inputHeight, - output_data+p*nInputPlane*outputWidth*outputHeight, - indices_data+p*nInputPlane*outputWidth*outputHeight, - nInputPlane, - inputWidth, inputHeight, - outputWidth, outputHeight, - kW, kH, dW, dH, - padW, padH, - dilationW, dilationH - ); - } - } - - /* cleanup */ - THTensor_(free)(input); -} - -static void THNN_(SpatialDilatedMaxPooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, - THIndex_t *ind_p, - long nInputPlane, - long inputWidth, - long inputHeight, - long outputWidth, - long outputHeight, - int dW, - int dH) -{ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nInputPlane; k++) - { - real *gradInput_p_k = gradInput_p + k*inputWidth*inputHeight; - real *gradOutput_p_k = gradOutput_p + k*outputWidth*outputHeight; - THIndex_t *ind_p_k = ind_p + k*outputWidth*outputHeight; - - /* calculate max points */ - long i, j; - for(i = 0; i < outputHeight; i++) - { - for(j = 0; j < outputWidth; j++) - { - /* retrieve position of max */ - long maxp = ind_p_k[i*outputWidth + j] - TH_INDEX_BASE; - if (maxp != -1) { - /* update gradient */ - gradInput_p_k[maxp] += gradOutput_p_k[i*outputWidth + j]; - } - } - } - } -} - -void THNN_(SpatialDilatedMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - int dilationW, - int dilationH, - bool ceil_mode) -{ - int dimw = 2; - int dimh = 1; - long nbatch = 1; - int nInputPlane; - int inputHeight; - int inputWidth; - int outputHeight; - int outputWidth; - real *gradInput_data; - real *gradOutput_data; - THIndex_t *indices_data; - - THNN_(SpatialDilatedMaxPooling_shapeCheck) - (input, gradOutput, indices, kH, kW, dH, dW, - padH, padW, dilationH, dilationW, ceil_mode); - - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - if (input->nDimension == 4) { - nbatch = input->size[0]; - dimw++; - dimh++; - } - - /* sizes */ - nInputPlane = input->size[dimh-1]; - inputHeight = input->size[dimh]; - inputWidth = input->size[dimw]; - outputHeight = gradOutput->size[dimh]; - outputWidth = gradOutput->size[dimw]; - - /* get raw pointers */ - gradInput_data = THTensor_(data)(gradInput); - gradOutput_data = THTensor_(data)(gradOutput); - indices_data = THIndexTensor_(data)(indices); - - /* backprop */ - if (input->nDimension == 3) - { - THNN_(SpatialDilatedMaxPooling_updateGradInput_frame) - (gradInput_data, gradOutput_data, - indices_data, - nInputPlane, - inputWidth, inputHeight, - outputWidth, outputHeight, - dW, dH); - } - else - { - long p; -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) - { - THNN_(SpatialDilatedMaxPooling_updateGradInput_frame) - (gradInput_data+p*nInputPlane*inputWidth*inputHeight, - gradOutput_data+p*nInputPlane*outputWidth*outputHeight, - indices_data+p*nInputPlane*outputWidth*outputHeight, - nInputPlane, - inputWidth, inputHeight, - outputWidth, outputHeight, - dW, dH); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialFractionalMaxPooling.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialFractionalMaxPooling.c deleted file mode 100644 index a98954cc6c..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialFractionalMaxPooling.c +++ /dev/null @@ -1,253 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialFractionalMaxPooling.c" -#else - -static long* THNN_(SpatialFractionalMaxPooling_generateIntervals)( - real sample, - long inputSize, - long outputSize, - int poolSize) { - real alpha = (real) (inputSize - poolSize) / (real) (outputSize - 1); - long* sequence = (long*) THAlloc(sizeof(long) * outputSize); - - long i; - for (i = 0; i < outputSize - 1; ++i) { - sequence[i] = - (long) ((i + sample) * alpha) - (long) (sample * alpha); - } - sequence[outputSize - 1] = inputSize - poolSize; - - return sequence; -} - -static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( - real* input, - real* output, - THIndex_t* indices, - real* randomSamples, - long numPlanes, - long inputW, long inputH, - long outputW, long outputH, - int poolSizeW, int poolSizeH) { - long plane; -#pragma omp parallel for private(plane) - for (plane = 0; plane < numPlanes; ++plane) { - /* each plane contains 2 random samples, one for W and one for H */ - real* randomSamplesForPlane = randomSamples + plane * 2; - - /* Generate interval sequence */ - long* sequenceW = - THNN_(SpatialFractionalMaxPooling_generateIntervals)( - randomSamplesForPlane[0], inputW, outputW, poolSizeW); - long* sequenceH = - THNN_(SpatialFractionalMaxPooling_generateIntervals)( - randomSamplesForPlane[1], inputH, outputH, poolSizeH); - - /* loop over output */ - long h, w; - - real* inputForPlane = input + plane * inputW * inputH; - real* outputForPlane = output + plane * outputW * outputH; - THIndex_t* indicesForPlane = indices + plane * outputW * outputH; - - for (h = 0; h < outputH; ++h) { - long inputHStart = sequenceH[h]; - - for (w = 0; w < outputW; ++w) { - long inputWStart = sequenceW[w]; - - real maxVal = -THInf; - long maxIndex = -1; - - long h2, w2; - for (h2 = inputHStart; h2 < inputHStart + poolSizeH; ++h2) { - for (w2 = inputWStart; w2 < inputWStart + poolSizeW; ++w2) { - THAssert(h2 >= 0 && h2 < inputH); - THAssert(w2 >= 0 && w2 < inputW); - - long planeIndex = h2 * inputW + w2; - real val = inputForPlane[planeIndex]; - if (val > maxVal) { - maxVal = val; - maxIndex = planeIndex; - } - } - } - - THAssert(maxVal != -THInf); - THAssert(maxIndex != -1); - - outputForPlane[h * outputW + w] = maxVal; - /* +1 to lua index */ - indicesForPlane[h * outputW + w] = maxIndex + TH_INDEX_BASE; - } - } - - THFree(sequenceW); - THFree(sequenceH); - } -} - -void THNN_(SpatialFractionalMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int outputW, int outputH, - int poolSizeW, int poolSizeH, - THIndexTensor *indices, - THTensor *randomSamples) { - - long numBatch = 1; - int planeDim = 0; - int heightDim = 1; - int widthDim = 2; - - long numInputDims = THTensor_(nDimension)(input); - THNN_ARGCHECK(numInputDims == 3 || numInputDims == 4, 2, input, - "3D or 4D (batch mode) tensor expected for input, but got: %s"); - - if (numInputDims == 4) { - numBatch = THTensor_(size)(input, 0); - planeDim++; - heightDim++; - widthDim++; - } - - /* sizes */ - long numPlanes = THTensor_(size)(input, planeDim); - long inputH = THTensor_(size)(input, heightDim); - long inputW = THTensor_(size)(input, widthDim); - - THArgCheck(outputH + poolSizeH - 1 < inputH, 7, - "poolSizeH (%d) too large relative to input height (%d)", - poolSizeH, inputH); - THArgCheck(outputW + poolSizeW - 1 < inputW, 6, - "poolSizeW (%d) too large relative to input width (%d)", - poolSizeW, inputW); - - /* get contiguous input */ - input = THTensor_(newContiguous)(input); - - if (numInputDims == 3) { - /* resize output */ - THTensor_(resize3d)(output, numPlanes, outputH, outputW); - /* indices will contain the locations for each output point */ - THIndexTensor_(resize3d)(indices, numPlanes, outputH, outputW); - - THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( - THTensor_(data)(input), - THTensor_(data)(output), - THIndexTensor_(data)(indices), - THTensor_(data)(randomSamples), - numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); - } else { - THTensor_(resize4d)(output, numBatch, numPlanes, outputH, outputW); - /* indices will contain the locations for each output point */ - THIndexTensor_(resize4d)(indices, numBatch, numPlanes, outputH, outputW); - - long batch; -#pragma omp parallel for private(batch) - for (batch = 0; batch < numBatch; ++batch) { - THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( - THTensor_(data)(input) + batch * numPlanes * inputH * inputW, - THTensor_(data)(output) + batch * numPlanes * outputH * outputW, - THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW, - THTensor_(data)(randomSamples) + batch * numPlanes * 2, - numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); - } - } - - /* cleanup */ - THTensor_(free)(input); -} - -static void THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( - real* gradInput, - real* gradOutput, - THIndex_t* indices, - long numPlanes, - long inputW, long inputH, - long outputW, long outputH) { - long plane; -#pragma omp parallel for private(plane) - for (plane = 0; plane < numPlanes; plane++) { - real* gradInputForPlane = gradInput + plane * inputW * inputH; - real* gradOutputForPlane = gradOutput + plane * outputW * outputH; - THIndex_t* indicesForPlane = indices + plane * outputW * outputH; - - long h, w; - for (h = 0; h < outputH; ++h) { - for (w = 0; w < outputW; ++w) { - long outputIndex = h * outputW + w; - long index = indicesForPlane[outputIndex] - TH_INDEX_BASE; - THAssert(index >= 0 && index < inputW * inputH); - - gradInputForPlane[index] += gradOutputForPlane[outputIndex]; - } - } - } -} - -void THNN_(SpatialFractionalMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int outputW, int outputH, - int poolSizeW, int poolSizeH, - THIndexTensor *indices) { - - long numBatch = 1; - int planeDim = 0; - int heightDim = 1; - int widthDim = 2; - - long numInputDims = THTensor_(nDimension)(input); - if (numInputDims == 4) { - numBatch = THTensor_(size)(input, 0); - planeDim = 1; - heightDim++; - widthDim++; - } - - /* sizes */ - long numPlanes = THTensor_(size)(input, planeDim); - long inputH = THTensor_(size)(input, heightDim); - long inputW = THTensor_(size)(input, widthDim); - - THArgCheck(outputW == THTensor_(size)(gradOutput, widthDim), 3, - "gradOutput width unexpected"); - THArgCheck(outputH == THTensor_(size)(gradOutput, heightDim), 3, - "gradOutput height unexpected"); - - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - /* backprop */ - if (numInputDims == 3) { - THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( - THTensor_(data)(gradInput), - THTensor_(data)(gradOutput), - THIndexTensor_(data)(indices), - numPlanes, inputW, inputH, outputW, outputH); - } else { - long batch; -#pragma omp parallel for private(batch) - for (batch = 0; batch < numBatch; ++batch) { - THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( - THTensor_(data)(gradInput) + batch * numPlanes * inputH * inputW, - THTensor_(data)(gradOutput) + batch * numPlanes * outputH * outputW, - THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW, - numPlanes, inputW, inputH, outputW, outputH); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialFullConvolution.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialFullConvolution.c deleted file mode 100644 index 2edc53b5ab..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialFullConvolution.c +++ /dev/null @@ -1,462 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialFullConvolution.c" -#else - -static void THNN_(im2col)(const real* data_im, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - real* data_col) { - const int height_col = (height + 2 * pad_h - - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; - const int width_col = (width + 2 * pad_w - - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; - const int channels_col = channels * kernel_h * kernel_w; - for (int c_col = 0; c_col < channels_col; ++c_col) { - int w_offset = c_col % kernel_w; - int h_offset = (c_col / kernel_w) % kernel_h; - int c_im = c_col / kernel_h / kernel_w; - for (int h_col = 0; h_col < height_col; ++h_col) { - for (int w_col = 0; w_col < width_col; ++w_col) { - int h_im = h_col * stride_h - pad_h + h_offset * dilation_h; - int w_im = w_col * stride_w - pad_w + w_offset * dilation_w; - data_col[(c_col * height_col + h_col) * width_col + w_col] = - (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? - data_im[(c_im * height + h_im) * width + w_im] : 0; - } - } - } -} - -static void THNN_(col2im)(const real* data_col, const int channels, - const int height, const int width, const int kernel_h, const int kernel_w, - const int pad_h, const int pad_w, - const int stride_h, const int stride_w, - const int dilation_h, const int dilation_w, - real* data_im) { - memset(data_im, 0, sizeof(real) * height * width * channels); - const int height_col = (height + 2 * pad_h - - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; - const int width_col = (width + 2 * pad_w - - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; - const int channels_col = channels * kernel_h * kernel_w; - for (int c_col = 0; c_col < channels_col; ++c_col) { - int w_offset = c_col % kernel_w; - int h_offset = (c_col / kernel_w) % kernel_h; - int c_im = c_col / kernel_h / kernel_w; - for (int h_col = 0; h_col < height_col; ++h_col) { - for (int w_col = 0; w_col < width_col; ++w_col) { - int h_im = h_col * stride_h - pad_h + h_offset * dilation_h; - int w_im = w_col * stride_w - pad_w + w_offset * dilation_w; - if (h_im >= 0 && h_im < height && w_im >= 0 && w_im < width) - data_im[(c_im * height + h_im) * width + w_im] += - data_col[(c_col * height_col + h_col) * width_col + w_col]; - } - } - } -} - -static inline void THNN_(SpatialFullConvolution_shapeCheck)( - THTensor *input, THTensor *gradOutput, - THTensor *weight, THTensor *bias, - int kH, int kW, int dH, int dW, int padH, int padW, int adjH, int adjW) { - - THArgCheck(kW > 0 && kH > 0, 9, - "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); - THArgCheck(dW > 0 && dH > 0, 11, - "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); - THArgCheck(adjW < dW && adjH < dH, 15, - "output adjustment must be smaller than stride, but got adjH: %d adjW: %d dH: %d dW: %d", - adjH, adjW, dH, dW); - THNN_ARGCHECK(weight->nDimension == 2 || weight->nDimension == 4, 5, weight, - "2D or 4D weight tensor expected, but got: %s"); - - if (bias != NULL) { - THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[1]); - } - - int ndim = input->nDimension; - int dimf = 0; - int dimh = 1; - int dimw = 2; - - if (ndim == 4) { - dimf++; - dimh++; - dimw++; - } - - THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input, - "3D or 4D input tensor expected but got: %s"); - - long nInputPlane = weight->size[0]; - long inputHeight = input->size[dimh]; - long inputWidth = input->size[dimw]; - long nOutputPlane = weight->size[1]; - long outputHeight = (inputHeight - 1) * dH - 2*padH + kH + adjH; - long outputWidth = (inputWidth - 1) * dW - 2*padW + kW + adjW; - - if (outputWidth < 1 || outputHeight < 1) - THError("Given input size: (%d x %d x %d). " - "Calculated output size: (%d x %d x %d). Output size is too small", - nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); - - THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); - } -} - -void THNN_(SpatialFullConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *columns, - THTensor *ones, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int adjW, int adjH) -{ - THNN_(SpatialFullConvolution_shapeCheck) - (input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, adjH, adjW); - - int nInputPlane = THTensor_(size)(weight,0); - int nOutputPlane = THTensor_(size)(weight,1); - - input = THTensor_(newContiguous)(input); - weight = THTensor_(newContiguous)(weight); - bias = bias ? THTensor_(newContiguous)(bias) : bias; - int batch = 1; - if (input->nDimension == 3) { - // Force batch - batch = 0; - THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); - } - - long inputHeight = input->size[2]; - long inputWidth = input->size[3]; - long outputHeight = (inputHeight - 1) * dH - 2*padH + kH + adjH; - long outputWidth = (inputWidth - 1) * dW - 2*padW + kW + adjW; - - // Batch size + input planes - long batchSize = input->size[0]; - - // Resize output - THTensor_(resize4d)(output, batchSize, nOutputPlane, outputHeight, outputWidth); - - // Resize temporary columns - THTensor_(resize2d)(columns, nOutputPlane*kW*kH, inputHeight*inputWidth); - THTensor_(zero)(columns); - - // Define a buffer of ones, for bias accumulation - // Note: this buffer can be shared with other modules, it only ever gets increased, - // and always contains ones. - if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { - // Resize plane and fill with ones... - THTensor_(resize2d)(ones, outputHeight, outputWidth); - THTensor_(fill)(ones, 1); - } - - // Helpers - THTensor *input_n = THTensor_(new)(); - THTensor *output_n = THTensor_(new)(); - - int elt; - // For each elt in batch, do: - for (elt = 0; elt < batchSize; elt ++) { - // Matrix mulitply per output: - THTensor_(select)(input_n, input, 0, elt); - THTensor_(select)(output_n, output, 0, elt); - - // M,N,K are dims of matrix A and B - // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) - long m = weight->size[1] * weight->size[2] * weight->size[3]; - long n = columns->size[1]; - long k = weight->size[0]; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 'n', 't', - n, m, k, - 1, - THTensor_(data)(input_n), n, - THTensor_(data)(weight), m, - 0, - THTensor_(data)(columns), n - ); - - // Unpack columns back into input: - THNN_(col2im)( - THTensor_(data)(columns), - nOutputPlane, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, - 1, 1, - THTensor_(data)(output_n) - ); - - // Do Bias after: - // M,N,K are dims of matrix A and B - // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) - long m_ = nOutputPlane; - long n_ = outputHeight * outputWidth; - long k_ = 1; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - if (bias) { - THBlas_(gemm)( - 't', 'n', - n_, m_, k_, - 1, - THTensor_(data)(ones), k_, - THTensor_(data)(bias), k_, - 1, - THTensor_(data)(output_n), n_ - ); - } - } - - // Free - THTensor_(free)(input_n); - THTensor_(free)(output_n); - - // Resize output - if (batch == 0) { - THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); - THTensor_(resize3d)(input, nInputPlane, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(weight); - if (bias) THTensor_(free)(bias); -} - -void THNN_(SpatialFullConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *gradColumns, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int adjW, int adjH) -{ - THNN_(SpatialFullConvolution_shapeCheck) - (input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, adjH, adjW); - - int nInputPlane = THTensor_(size)(weight,0); - int nOutputPlane = THTensor_(size)(weight,1); - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - weight = THTensor_(newContiguous)(weight); - int batch = 1; - if (input->nDimension == 3) { - // Force batch - batch = 0; - THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); - THTensor_(resize4d)(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); - } - - long inputWidth = input->size[3]; - long inputHeight = input->size[2]; - long outputWidth = (inputWidth - 1) * dW - 2*padW + kW + adjW; - long outputHeight = (inputHeight - 1) * dH - 2*padH + kH + adjH; - - // Batch size + input planes - long batchSize = input->size[0]; - - // Resize output - THTensor_(resize4d)(gradInput, batchSize, nInputPlane, inputHeight, inputWidth); - THTensor_(zero)(gradInput); - - // Resize temporary columns - THTensor_(resize2d)(gradColumns, nOutputPlane*kW*kH, inputHeight*inputWidth); - - // Helpers - THTensor *gradInput_n = THTensor_(new)(); - THTensor *gradOutput_n = THTensor_(new)(); - - int elt; - // For each elt in batch, do: - for (elt = 0; elt < batchSize; elt ++) { - // Matrix mulitply per sample: - THTensor_(select)(gradInput_n, gradInput, 0, elt); - THTensor_(select)(gradOutput_n, gradOutput, 0, elt); - - // Extract columns: - THNN_(im2col)( - THTensor_(data)(gradOutput_n), - nOutputPlane, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, - 1, 1, - THTensor_(data)(gradColumns) - ); - - - // M,N,K are dims of matrix A and B - // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) - long m = weight->size[0]; - long n = gradColumns->size[1]; - long k = weight->size[1] * weight->size[2] * weight->size[3]; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 'n', 'n', - n, m, k, - 1, - THTensor_(data)(gradColumns), n, - THTensor_(data)(weight), k, - 0, - THTensor_(data)(gradInput_n), n - ); - } - - - // Free - THTensor_(free)(gradInput_n); - THTensor_(free)(gradOutput_n); - - // Resize output - if (batch == 0) { - THTensor_(resize3d)(gradOutput, nOutputPlane, outputHeight, outputWidth); - THTensor_(resize3d)(input, nInputPlane, inputHeight, inputWidth); - THTensor_(resize3d)(gradInput, nInputPlane, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(weight); -} - - -void THNN_(SpatialFullConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *columns, - THTensor *ones, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int adjW, int adjH, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - THNN_(SpatialFullConvolution_shapeCheck) - (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, adjH, adjW); - - int nInputPlane = THTensor_(size)(gradWeight,0); - int nOutputPlane = THTensor_(size)(gradWeight,1); - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); - if (gradBias) - THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); - int batch = 1; - if (input->nDimension == 3) { - // Force batch - batch = 0; - THTensor_(resize4d)(input, 1, input->size[0], input->size[1], input->size[2]); - THTensor_(resize4d)(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]); - } - - long inputWidth = input->size[3]; - long inputHeight = input->size[2]; - long outputWidth = (inputWidth - 1) * dW - 2*padW + kW + adjW; - long outputHeight = (inputHeight - 1) * dH - 2*padH + kH + adjH; - - // Batch size + input planes - long batchSize = input->size[0]; - - // Define a buffer of ones, for bias accumulation - if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) { - // Resize plane and fill with ones... - THTensor_(resize2d)(ones, outputHeight, outputWidth); - THTensor_(fill)(ones, 1); - } - - // Resize temporary columns - THTensor_(resize2d)(columns, nOutputPlane*kW*kH, inputHeight*inputWidth); - - // Helpers - THTensor *input_n = THTensor_(new)(); - THTensor *gradOutput_n = THTensor_(new)(); - - int elt; - // For each elt in batch, do: - for (elt = 0; elt < batchSize; elt ++) { - // Matrix mulitply per output: - THTensor_(select)(input_n, input, 0, elt); - THTensor_(select)(gradOutput_n, gradOutput, 0, elt); - - // Extract columns: - THNN_(im2col)( - THTensor_(data)(gradOutput_n), - nOutputPlane, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, - 1, 1, - THTensor_(data)(columns) - ); - - // M,N,K are dims of matrix A and B - // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) - long n = columns->size[0]; // nOutputPlane * kh * kw - long m = input_n->size[0]; // nInputPlane - long k = columns->size[1]; // inputHeight * inputWidth - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 't', 'n', - n, m, k, - scale, - THTensor_(data)(columns), k, - THTensor_(data)(input_n), k, - 1, - THTensor_(data)(gradWeight), n - ); - - - // Do Bias: - // M,N,K are dims of matrix A and B - // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) - long m_ = nOutputPlane; - long k_ = outputHeight * outputWidth; - - // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) - if (gradBias) { - THBlas_(gemv)( - 't', - k_, m_, - scale, - THTensor_(data)(gradOutput_n), k_, - THTensor_(data)(ones), 1, - 1, - THTensor_(data)(gradBias), 1 - ); - } - } - - // Free - THTensor_(free)(input_n); - THTensor_(free)(gradOutput_n); - - // Resize - if (batch == 0) { - THTensor_(resize3d)(gradOutput, nOutputPlane, outputHeight, outputWidth); - THTensor_(resize3d)(input, nInputPlane, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialFullConvolutionMap.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialFullConvolutionMap.c deleted file mode 100644 index 6952fbe259..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialFullConvolutionMap.c +++ /dev/null @@ -1,222 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialFullConvolutionMap.c" -#else - -void THNN_(SpatialFullConvolutionMap_updateOutput)( - THNNState *state, THTensor *input, THTensor *output_, THTensor *weight, THTensor *bias, - THTensor *connTable, int nInputPlane, int nOutputPlane, - int dW, int dH) -{ - THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); - THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); - THArgCheck( - weight != NULL && weight->nDimension == 3 - && connTable != NULL && connTable->size[0] == weight->size[0], 4, - "3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE - ); - - const int kH = (int)weight->size[1]; - const int kW = (int)weight->size[2]; - - THArgCheck(input != NULL && input->nDimension == 3, 2, "3D tensor expected"); - THArgCheck(input->size[0] >= nInputPlane, 2, "invalid number of input planes"); - - THTensor_(resize3d)( - output_, nOutputPlane, - (input->size[1] - 1) * dH + kH, - (input->size[2] - 1) * dW + kW - ); - - /* contiguous */ - input = THTensor_(newContiguous)(input); - THTensor* output = THTensor_(newContiguous)(output_); - - /* get raw pointers */ - real *input_data = THTensor_(data)(input); - real *output_data = THTensor_(data)(output); - real *weight_data = THTensor_(data)(weight); - real *bias_data = THTensor_(data)(bias); - real *connTable_data = THTensor_(data)(connTable); - - /* and dims */ - const long input_h = input->size[1]; - const long input_w = input->size[2]; - const long output_h = output->size[1]; - const long output_w = output->size[2]; - const long weight_h = weight->size[1]; - const long weight_w = weight->size[2]; - - long p; -#pragma omp parallel for private(p) - for (p = 0; p < nOutputPlane; p++) - { - /* add bias */ - real *ptr_output = output_data + p*output_w*output_h; - long j; - int nweight; - long k; - - for (j = 0; j < output_h*output_w; j++) - ptr_output[j] = bias_data[p]; - - /* convolve all maps */ - nweight = connTable->size[0]; - for (k = 0; k < nweight; k++) - { - /* get offsets for input/output */ - int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE; - int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE; - - if (o == p) - { - THTensor_(fullConv2Dptr)( - output_data + o*output_w*output_h, - 1.0, - input_data + i*input_w*input_h, input_h, input_w, - weight_data + k*weight_w*weight_h, weight_h, weight_w, - dH, dW - ); - } - } - } - - /* clean up */ - THTensor_(free)(input); - THTensor_(freeCopyTo)(output, output_); -} - -void THNN_(SpatialFullConvolutionMap_updateGradInput)( - THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput_, THTensor *weight, THTensor *bias, - THTensor *connTable, int nInputPlane, int nOutputPlane, - int dW, int dH) -{ - THArgCheck( - weight != NULL && weight->nDimension == 3 - && connTable != NULL && connTable->size[0] == weight->size[0], 5, - "3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE - ); - - /* contiguous */ - THTensor* gradInput = THTensor_(newContiguous)(gradInput_); - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* Resize/Zero */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - /* get raw pointers */ - real *gradInput_data = THTensor_(data)(gradInput); - real *gradOutput_data = THTensor_(data)(gradOutput); - real *weight_data = THTensor_(data)(weight); - real *connTable_data = THTensor_(data)(connTable); - - /* and dims */ - const long input_h = input->size[1]; - const long input_w = input->size[2]; - const long output_h = gradOutput->size[1]; - const long output_w = gradOutput->size[2]; - const long kH = weight->size[1]; - const long kW = weight->size[2]; - - long p; -#pragma omp parallel for private(p) - for (p = 0; p < nInputPlane; p++) - { - long k; - /* backward all */ - int nkernel = connTable->size[0]; - for (k = 0; k < nkernel; k++) - { - int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE; - int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE; - if (i == p) - { - /* gradient to input */ - THTensor_(validXCorr2Dptr)( - gradInput_data + i*input_w*input_h, - 1.0, - gradOutput_data + o*output_w*output_h, output_h, output_w, - weight_data + k*kW*kH, kH, kW, - dH, dW - ); - } - } - } - - /* clean up */ - THTensor_(freeCopyTo)(gradInput, gradInput_); - THTensor_(free)(gradOutput); -} - -void THNN_(SpatialFullConvolutionMap_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *connTable, - int nInputPlane, - int nOutputPlane, - int dW, int dH, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - THArgCheck( - gradWeight != NULL && gradWeight->nDimension == 3 - && connTable != NULL && connTable->size[0] == gradWeight->size[0], 5, - "3D gradWeight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE - ); - - /* contiguous */ - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* get raw pointers */ - real *input_data = THTensor_(data)(input); - real *gradOutput_data = THTensor_(data)(gradOutput); - real *gradWeight_data = THTensor_(data)(gradWeight); - real *gradBias_data = THTensor_(data)(gradBias); - - /* and dims */ - const long input_h = input->size[1]; - const long input_w = input->size[2]; - const long output_h = gradOutput->size[1]; - const long output_w = gradOutput->size[2]; - const long weight_h = gradWeight->size[1]; - const long weight_w = gradWeight->size[2]; - - /* gradients wrt bias */ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nOutputPlane; k++) - { - real *ptr_gradOutput = gradOutput_data + k*output_w*output_h; - long l; - for (l = 0; l < output_h*output_w; l++) - gradBias_data[k] += scale*ptr_gradOutput[l]; - } - - /* gradients wrt weight */ - int nkernel = connTable->size[0]; -#pragma omp parallel for private(k) - for (k = 0; k < nkernel; k++) - { - int o = (int)THTensor_(get2d)(connTable,k,1) - TH_INDEX_BASE; - int i = (int)THTensor_(get2d)(connTable,k,0) - TH_INDEX_BASE; - - /* gradient to kernel */ - THTensor_(validXCorr2DRevptr)( - gradWeight_data + k*weight_w*weight_h, - scale, - gradOutput_data + o*output_w*output_h, output_h, output_w, - input_data + i*input_w*input_h, input_h, input_w, - dH, dW - ); - } - - /* clean up */ - THTensor_(free)(input); - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialMaxPooling.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialMaxPooling.c deleted file mode 100644 index 88aaa40e17..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialMaxPooling.c +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialMaxPooling.c" -#else - -void THNN_(SpatialMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - bool ceil_mode) -{ - THNN_(SpatialDilatedMaxPooling_updateOutput)( - state, input, output, indices, - kW, kH, dW, dH, padW, padH, 1, 1, ceil_mode - ); -} - -void THNN_(SpatialMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - bool ceil_mode) -{ - THNN_(SpatialDilatedMaxPooling_updateGradInput)( - state, input, gradOutput, gradInput, indices, - kW, kH, dW, dH, padW, padH, 1, 1, ceil_mode - ); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialMaxUnpooling.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialMaxUnpooling.c deleted file mode 100644 index 3205386860..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialMaxUnpooling.c +++ /dev/null @@ -1,234 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialMaxUnpooling.c" -#else - -static void THNN_(SpatialMaxUnpooling_updateOutput_frame)(real *input_p, real *output_p, - THIndex_t *ind_p, - int nslices, - int iwidth, int iheight, - int owidth, int oheight) -{ - int k; - int has_error = 0; - THIndex_t error_index; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - real *output_p_k = output_p + k*owidth*oheight; - real *input_p_k = input_p + k*iwidth*iheight; - THIndex_t *ind_p_k = ind_p + k*iwidth*iheight; - - int i, j; - THIndex_t maxp; - for(i = 0; i < iheight; i++) - { - for(j = 0; j < iwidth; j++) - { - maxp = ind_p_k[i*iwidth + j] - TH_INDEX_BASE; /* retrieve position of max */ - if(maxp<0 || maxp>=owidth*oheight){ -#pragma omp critical - { - has_error = 1; - error_index = maxp; - } - } else { - output_p_k[maxp] = input_p_k[i*iwidth + j]; /* update output */ - } - } - } - } - if (has_error) { - THError("found an invalid max index %ld (output volumes are of size %dx%d)", - error_index, oheight, owidth); - } -} - -void THNN_(SpatialMaxUnpooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int owidth, int oheight) -{ - int dimw = 2; - int dimh = 1; - int nbatch = 1; - int nslices; - int iheight; - int iwidth; - real *input_data; - real *output_data; - THIndex_t *indices_data; - - - THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, - "3D or 4D (batch mode) tensor expected for input, but got: %s"); - THNN_CHECK_SHAPE_INDICES(input, indices); - - if (input->nDimension == 4) - { - nbatch = input->size[0]; - dimw++; - dimh++; - } - - /* sizes */ - nslices = input->size[dimh-1]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - - /* get contiguous input and indices */ - input = THTensor_(newContiguous)(input); - indices = THIndexTensor_(newContiguous)(indices); - - /* resize output */ - if (input->nDimension == 3) - { - THTensor_(resize3d)(output, nslices, oheight, owidth); - THTensor_(zero)(output); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - - THNN_(SpatialMaxUnpooling_updateOutput_frame)(input_data, output_data, - indices_data, - nslices, - iwidth, iheight, - owidth, oheight); - } - else - { - int p; - - THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); - THTensor_(zero)(output); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - - for (p = 0; p < nbatch; p++) - { - THNN_(SpatialMaxUnpooling_updateOutput_frame)( - input_data+p*nslices*iwidth*iheight, - output_data+p*nslices*owidth*oheight, - indices_data+p*nslices*iwidth*iheight, - nslices, - iwidth, iheight, - owidth, oheight); - } - } - - /* cleanup */ - THTensor_(free)(input); - THIndexTensor_(free)(indices); -} - -static void THNN_(SpatialMaxUnpooling_updateGradInput_frame)(real *gradInput_p, real *gradOutput_p, - THIndex_t *ind_p, - int nslices, - int iwidth, int iheight, - int owidth, int oheight) -{ - int k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - real *gradInput_p_k = gradInput_p + k*iwidth*iheight; - real *gradOutput_p_k = gradOutput_p + k*owidth*oheight; - THIndex_t *ind_p_k = ind_p + k*iwidth*iheight; - - int i, j; - THIndex_t maxp; - for(i = 0; i < iheight; i++) - { - for(j = 0; j < iwidth; j++) - { - maxp = ind_p_k[i*iwidth + j] - TH_INDEX_BASE; /* retrieve position of max */ - if(maxp < 0 || maxp >= owidth * oheight) { - THError("invalid max index %ld, owidth= %d, oheight= %d", maxp, owidth, oheight); - } - gradInput_p_k[i*iwidth + j] = gradOutput_p_k[maxp]; /* update gradient */ - } - } - } -} - -void THNN_(SpatialMaxUnpooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int owidth, int oheight) -{ - int dimw = 2; - int dimh = 1; - int nbatch = 1; - int nslices; - int iheight; - int iwidth; - real *gradInput_data; - real *gradOutput_data; - THIndex_t *indices_data; - - THNN_CHECK_SHAPE_INDICES(input, indices); - - /* get contiguous gradOutput and indices */ - gradOutput = THTensor_(newContiguous)(gradOutput); - indices = THIndexTensor_(newContiguous)(indices); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - if (input->nDimension == 4) { - nbatch = input->size[0]; - dimw++; - dimh++; - } - - /* sizes */ - nslices = input->size[dimh-1]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - - if(owidth!=gradOutput->size[dimw] || oheight!=gradOutput->size[dimh]){ - THError("Inconsistent gradOutput size. oheight= %d, owidth= %d, gradOutput: %dx%d", - oheight, owidth, gradOutput->size[dimh], gradOutput->size[dimw]); - } - - /* get raw pointers */ - gradInput_data = THTensor_(data)(gradInput); - gradOutput_data = THTensor_(data)(gradOutput); - indices_data = THIndexTensor_(data)(indices); - - /* backprop */ - if (input->nDimension == 3) - { - THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data, gradOutput_data, - indices_data, - nslices, - iwidth, iheight, - owidth, oheight); - } - else - { - int p; - for (p = 0; p < nbatch; p++) - { - THNN_(SpatialMaxUnpooling_updateGradInput_frame)(gradInput_data+p*nslices*iwidth*iheight, gradOutput_data+p*nslices*owidth*oheight, - indices_data+p*nslices*iwidth*iheight, - nslices, - iwidth, iheight, - owidth, oheight); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); - THIndexTensor_(free)(indices); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialReflectionPadding.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialReflectionPadding.c deleted file mode 100644 index dcde660eac..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialReflectionPadding.c +++ /dev/null @@ -1,260 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialReflectionPadding.c" -#else - -static void THNN_(SpatialReflectionPadding_updateOutput_frame)( - real *input_p, real *output_p, - long nslices, - long iwidth, long iheight, - long owidth, long oheight, - int pad_l, int pad_r, - int pad_t, int pad_b) -{ - int iStartX = fmax(0, -pad_l); - int iStartY = fmax(0, -pad_t); - int oStartX = fmax(0, pad_l); - int oStartY = fmax(0, pad_t); - - long k, ip_x, ip_y; -#pragma omp parallel for private(k, ip_x, ip_y) - - for (k = 0; k < nslices; k++) - { - long i, j; - for (i = 0; i < oheight; i++) { - for (j = 0; j < owidth; j++) { - if (j < pad_l) { - ip_x = pad_l * 2 - j; - } else if (j >= pad_l && j < iwidth + pad_l) { - ip_x = j; - } else { - ip_x = (iwidth + pad_l - 1) * 2 - j; - } - ip_x = ip_x - oStartX + iStartX; - - if (i < pad_t) { - ip_y = pad_t * 2 - i; - } else if (i >= pad_t && i < iheight + pad_t) { - ip_y = i; - } else { - ip_y = (iheight + pad_t - 1) * 2 - i; - } - ip_y = ip_y - oStartY + iStartY; - - real *dest_p = output_p + k*owidth*oheight + i * owidth + j; - real *src_p = input_p + k*iwidth*iheight + ip_y * iwidth + ip_x; - *dest_p = *src_p; - } - } - } -} - -void THNN_(SpatialReflectionPadding_updateOutput)(THNNState *state, - THTensor *input, - THTensor *output, - int pad_l, int pad_r, - int pad_t, int pad_b) -{ - int dimw = 2; - int dimh = 1; - int dimslices = 0; - long nbatch = 1; - long nslices; - long iheight; - long iwidth; - long oheight; - long owidth; - real *input_data; - real *output_data; - - THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, - "3D or 4D (batch mode) tensor expected for input, but got: %s"); - - if (input->nDimension == 4) - { - nbatch = input->size[0]; - dimw++; - dimh++; - dimslices++; - } - - /* sizes */ - nslices = input->size[dimslices]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - oheight = iheight + pad_t + pad_b; - owidth = iwidth + pad_l + pad_r; - - THArgCheck(owidth >= 1 || oheight >= 1 , 2, - "input (H: %d, W: %d)is too small." - " Calculated output H: %d W: %d", - iheight, iwidth, oheight, owidth); - - /* get contiguous input */ - input = THTensor_(newContiguous)(input); - - /* resize output */ - if (input->nDimension == 3) - { - THTensor_(resize3d)(output, nslices, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - - THNN_(SpatialReflectionPadding_updateOutput_frame)(input_data, output_data, - nslices, - iwidth, iheight, - owidth, oheight, - pad_l, pad_r, - pad_t, pad_b); - } - else - { - long p; - - THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) - { - THNN_(SpatialReflectionPadding_updateOutput_frame)( - input_data+p*nslices*iwidth*iheight, - output_data+p*nslices*owidth*oheight, - nslices, - iwidth, iheight, - owidth, oheight, - pad_l, pad_r, - pad_t, pad_b); - } - } - - /* cleanup */ - THTensor_(free)(input); -} - -static void THNN_(SpatialReflectionPadding_updateGradInput_frame)( - real *ginput_p, real *goutput_p, - long nslices, - long iwidth, long iheight, - long owidth, long oheight, - int pad_l, int pad_r, - int pad_t, int pad_b) -{ - int iStartX = fmax(0, -pad_l); - int iStartY = fmax(0, -pad_t); - int oStartX = fmax(0, pad_l); - int oStartY = fmax(0, pad_t); - - long k, ip_x, ip_y; -#pragma omp parallel for private(k, ip_x, ip_y) - - for (k = 0; k < nslices; k++) - { - long i, j; - for (i = 0; i < oheight; i++) { - for (j = 0; j < owidth; j++) { - if (j < pad_l) { - ip_x = pad_l * 2 - j; - } else if (j >= pad_l && j < iwidth + pad_l) { - ip_x = j; - } else { - ip_x = (iwidth + pad_l - 1) * 2 - j; - } - ip_x = ip_x - oStartX + iStartX; - - if (i < pad_t) { - ip_y = pad_t * 2 - i; - } else if (i >= pad_t && i < iheight + pad_t) { - ip_y = i; - } else { - ip_y = (iheight + pad_t - 1) * 2 - i; - } - ip_y = ip_y - oStartY + iStartY; - - real *src_p = goutput_p + k*owidth*oheight + i * owidth + j; - real *dest_p = ginput_p + k*iwidth*iheight + ip_y * iwidth + ip_x; - *dest_p += *src_p; - } - } - } -} - -void THNN_(SpatialReflectionPadding_updateGradInput)(THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int pad_l, int pad_r, - int pad_t, int pad_b) -{ - int dimw = 2; - int dimh = 1; - int dimslices = 0; - long nbatch = 1; - long nslices; - long iheight; - long iwidth; - long oheight; - long owidth; - - if (input->nDimension == 4) - { - nbatch = input->size[0]; - dimw++; - dimh++; - dimslices++; - } - - /* sizes */ - nslices = input->size[dimslices]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - oheight = iheight + pad_t + pad_b; - owidth = iwidth + pad_l + pad_r; - - THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3, - "gradOutput width unexpected. Expected: %d, Got: %d", - owidth, THTensor_(size)(gradOutput, dimw)); - THArgCheck(oheight == THTensor_(size)(gradOutput, dimh), 3, - "gradOutput height unexpected. Expected: %d, Got: %d", - oheight, THTensor_(size)(gradOutput, dimh)); - - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - /* backprop */ - if (input->nDimension == 3) { - THNN_(SpatialReflectionPadding_updateGradInput_frame)( - THTensor_(data)(gradInput), - THTensor_(data)(gradOutput), - nslices, - iwidth, iheight, - owidth, oheight, - pad_l, pad_r, - pad_t, pad_b); - } else { - long p; -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) { - THNN_(SpatialReflectionPadding_updateGradInput_frame)( - THTensor_(data)(gradInput) + p * nslices * iheight * iwidth, - THTensor_(data)(gradOutput) + p * nslices * oheight * owidth, - nslices, - iwidth, iheight, - owidth, oheight, - pad_l, pad_r, - pad_t, pad_b); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialReplicationPadding.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialReplicationPadding.c deleted file mode 100644 index 4e318aa70e..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialReplicationPadding.c +++ /dev/null @@ -1,260 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialReplicationPadding.c" -#else - -static void THNN_(SpatialReplicationPadding_updateOutput_frame)( - real *input_p, real *output_p, - long nslices, - long iwidth, long iheight, - long owidth, long oheight, - int pad_l, int pad_r, - int pad_t, int pad_b) -{ - int iStartX = fmax(0, -pad_l); - int iStartY = fmax(0, -pad_t); - int oStartX = fmax(0, pad_l); - int oStartY = fmax(0, pad_t); - - long k, ip_x, ip_y; -#pragma omp parallel for private(k, ip_x, ip_y) - for (k = 0; k < nslices; k++) - { - long i, j; - for (i = 0; i < oheight; i++) { - for (j = 0; j < owidth; j++) { - if (j < pad_l) { - ip_x = pad_l; - } else if (j >= pad_l && j < iwidth + pad_l) { - ip_x = j; - } else { - ip_x = iwidth + pad_l - 1; - } - ip_x = ip_x - oStartX + iStartX; - - if (i < pad_t) { - ip_y = pad_t; - } else if (i >= pad_t && i < iheight + pad_t) { - ip_y = i; - } else { - ip_y = iheight + pad_t - 1; - } - ip_y = ip_y - oStartY + iStartY; - - real *dest_p = output_p + k*owidth*oheight + i * owidth + j; - real *src_p = input_p + k*iwidth*iheight + ip_y * iwidth + ip_x; - *dest_p = *src_p; - } - } - } -} - -void THNN_(SpatialReplicationPadding_updateOutput)(THNNState *state, - THTensor *input, - THTensor *output, - int pad_l, int pad_r, - int pad_t, int pad_b) -{ - int dimw = 2; - int dimh = 1; - int dimslices = 0; - long nbatch = 1; - long nslices; - long iheight; - long iwidth; - long oheight; - long owidth; - real *input_data; - real *output_data; - - THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, - "3D or 4D (batch mode) tensor expected for input, but got: %s"); - - if (input->nDimension == 4) - { - nbatch = input->size[0]; - dimw++; - dimh++; - dimslices++; - } - - /* sizes */ - nslices = input->size[dimslices]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - oheight = iheight + pad_t + pad_b; - owidth = iwidth + pad_l + pad_r; - - THArgCheck(owidth >= 1 || oheight >= 1 , 2, - "input (H: %d, W: %d)is too small." - " Calculated output H: %d W: %d", - iheight, iwidth, oheight, owidth); - - - /* get contiguous input */ - input = THTensor_(newContiguous)(input); - - /* resize output */ - if (input->nDimension == 3) - { - THTensor_(resize3d)(output, nslices, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - - THNN_(SpatialReplicationPadding_updateOutput_frame)(input_data, output_data, - nslices, - iwidth, iheight, - owidth, oheight, - pad_l, pad_r, - pad_t, pad_b); - } - else - { - long p; - - THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) - { - THNN_(SpatialReplicationPadding_updateOutput_frame)( - input_data+p*nslices*iwidth*iheight, - output_data+p*nslices*owidth*oheight, - nslices, - iwidth, iheight, - owidth, oheight, - pad_l, pad_r, - pad_t, pad_b); - } - } - - /* cleanup */ - THTensor_(free)(input); -} - -static void THNN_(SpatialReplicationPadding_updateGradInput_frame)( - real *ginput_p, real *goutput_p, - long nslices, - long iwidth, long iheight, - long owidth, long oheight, - int pad_l, int pad_r, - int pad_t, int pad_b) -{ - int iStartX = fmax(0, -pad_l); - int iStartY = fmax(0, -pad_t); - int oStartX = fmax(0, pad_l); - int oStartY = fmax(0, pad_t); - - long k, ip_x, ip_y; -#pragma omp parallel for private(k, ip_x, ip_y) - for (k = 0; k < nslices; k++) - { - long i, j; - for (i = 0; i < oheight; i++) { - for (j = 0; j < owidth; j++) { - if (j < pad_l) { - ip_x = pad_l; - } else if (j >= pad_l && j < iwidth + pad_l) { - ip_x = j; - } else { - ip_x = iwidth + pad_l - 1; - } - ip_x = ip_x - oStartX + iStartX; - - if (i < pad_t) { - ip_y = pad_t; - } else if (i >= pad_t && i < iheight + pad_t) { - ip_y = i; - } else { - ip_y = iheight + pad_t - 1; - } - ip_y = ip_y - oStartY + iStartY; - - real *src_p = goutput_p + k*owidth*oheight + i * owidth + j; - real *dest_p = ginput_p + k*iwidth*iheight + ip_y * iwidth + ip_x; - *dest_p += *src_p; - } - } - } -} - -void THNN_(SpatialReplicationPadding_updateGradInput)(THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int pad_l, int pad_r, - int pad_t, int pad_b) -{ - int dimw = 2; - int dimh = 1; - int dimslices = 0; - long nbatch = 1; - long nslices; - long iheight; - long iwidth; - long oheight; - long owidth; - - if (input->nDimension == 4) - { - nbatch = input->size[0]; - dimw++; - dimh++; - dimslices++; - } - - /* sizes */ - nslices = input->size[dimslices]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - oheight = iheight + pad_t + pad_b; - owidth = iwidth + pad_l + pad_r; - - THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3, - "gradOutput width unexpected. Expected: %d, Got: %d", - owidth, THTensor_(size)(gradOutput, dimw)); - THArgCheck(oheight == THTensor_(size)(gradOutput, dimh), 3, - "gradOutput height unexpected. Expected: %d, Got: %d", - oheight, THTensor_(size)(gradOutput, dimh)); - - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - /* backprop */ - if (input->nDimension == 3) { - THNN_(SpatialReplicationPadding_updateGradInput_frame)( - THTensor_(data)(gradInput), - THTensor_(data)(gradOutput), - nslices, - iwidth, iheight, - owidth, oheight, - pad_l, pad_r, - pad_t, pad_b); - } else { - long p; -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) { - THNN_(SpatialReplicationPadding_updateGradInput_frame)( - THTensor_(data)(gradInput) + p * nslices * iheight * iwidth, - THTensor_(data)(gradOutput) + p * nslices * oheight * owidth, - nslices, - iwidth, iheight, - owidth, oheight, - pad_l, pad_r, - pad_t, pad_b); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialSubSampling.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialSubSampling.c deleted file mode 100644 index 4c077bc643..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialSubSampling.c +++ /dev/null @@ -1,302 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialSubSampling.c" -#else - -static inline void THNN_(SpatialSubSampling_shapeCheck)( - THTensor *input, - THTensor *gradOutput, - THTensor *weight, - int kW, int kH) { - int ndims = input->nDimension; - THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, - "3D or 4D input tensor expected but got: %s"); - THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); - - int nInputPlane = THTensor_(size)(weight, 0); - - int dimw = 2; - int dimh = 1; - - long inputWidth; - long inputHeight; - - if (input->nDimension == 4) { - dimw++; - dimh++; - } - - inputWidth = input->size[dimw]; - inputHeight = input->size[dimh]; - - THArgCheck(input->size[dimh-1] == nInputPlane, 2, "invalid number of input planes"); - THArgCheck(inputWidth >= kW && inputHeight >= kH, 2, "input image smaller than kernel size"); -} - -void THNN_(SpatialSubSampling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - int kW, int kH, - int dW, int dH) -{ - THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); - - real *weight_data = THTensor_(data)(weight); - real *bias_data = THTensor_(data)(bias); - real *output_data; - real *input_data; - - int dimw = 2; - int dimh = 1; - long nbatch = 1; - - long inputWidth; - long inputHeight; - long outputWidth; - long outputHeight; - - int nInputPlane = THTensor_(size)(weight,0); - - long k; - - THNN_(SpatialSubSampling_shapeCheck)(input, NULL, weight, kW, kH); - - if (input->nDimension == 4) { - nbatch = input->size[0]; - dimw++; - dimh++; - } - - inputWidth = input->size[dimw]; - inputHeight = input->size[dimh]; - outputWidth = (inputWidth - kW) / dW + 1; - outputHeight = (inputHeight - kH) / dH + 1; - - if (input->nDimension == 3) - THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth); - else - THTensor_(resize4d)(output, input->size[0], nInputPlane, outputHeight, outputWidth); - - input = THTensor_(newContiguous)(input); - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - -#pragma omp parallel for private(k) - for(k = 0; k < nInputPlane; k++) - { - long p; - for(p = 0; p < nbatch; p++) - { - long xx, yy; - /* For all output pixels... */ - real *ptr_output = output_data + p*nInputPlane*outputWidth*outputHeight + k*outputWidth*outputHeight; - /* Get the good mask for (k,i) (k out, i in) */ - real the_weight = weight_data[k]; - /* Initialize to the bias */ - real z = bias_data[k]; - long i; - for(i = 0; i < outputWidth*outputHeight; i++) - ptr_output[i] = z; - - for(yy = 0; yy < outputHeight; yy++) - { - for(xx = 0; xx < outputWidth; xx++) - { - /* Compute the mean of the input image... */ - real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW; - real sum = 0; - long kx, ky; - - for(ky = 0; ky < kH; ky++) - { - for(kx = 0; kx < kW; kx++) - sum += ptr_input[kx]; - ptr_input += inputWidth; /* next input line */ - } - /* Update output */ - *ptr_output++ += the_weight*sum; - } - } - } - } - THTensor_(free)(input); -} - -void THNN_(SpatialSubSampling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - int kW, int kH, - int dW, int dH) -{ - THNN_(SpatialSubSampling_shapeCheck)(input, gradOutput, weight, kW, kH); - - int dimw = 2; - int dimh = 1; - long nbatch = 1; - - long inputWidth; - long inputHeight; - long outputWidth; - long outputHeight; - - int nInputPlane = THTensor_(size)(weight,0); - - real *weight_data; - real *gradOutput_data; - real *input_data, *gradInput_data; - - long k; - - if (input->nDimension == 4) { - nbatch = input->size[0]; - dimw++; - dimh++; - } - - inputWidth = input->size[dimw]; - inputHeight = input->size[dimh]; - outputWidth = (inputWidth - kW) / dW + 1; - outputHeight = (inputHeight - kH) / dH + 1; - - weight_data = THTensor_(data)(weight); - gradOutput = THTensor_(newContiguous)(gradOutput); - gradOutput_data = THTensor_(data)(gradOutput); - - input_data = THTensor_(data)(input); - - THTensor_(resizeAs)(gradInput, input); - gradInput_data = THTensor_(data)(gradInput); - -#pragma omp parallel for private(k) - for(k = 0; k < nInputPlane; k++) - { - long p; - for(p = 0; p < nbatch; p++) - { - real the_weight = weight_data[k]; - real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; - long xx, yy; - - real* ptr_gi = gradInput_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight; - long i; - for(i=0; inDimension == 4) { - dimw++; - dimh++; - nbatch = input->size[0]; - } - - inputWidth = input->size[dimw]; - inputHeight = input->size[dimh]; - outputWidth = (inputWidth - kW) / dW + 1; - outputHeight = (inputHeight - kH) / dH + 1; - - gradWeight_data = THTensor_(data)(gradWeight); - gradBias_data = THTensor_(data)(gradBias); - gradOutput = THTensor_(newContiguous)(gradOutput); - gradOutput_data = THTensor_(data)(gradOutput); - - input = THTensor_(newContiguous)(input); - input_data = THTensor_(data)(input); - -#pragma omp parallel for private(k) - for(k = 0; k < nInputPlane; k++) - { - long p; - for(p = 0; p < nbatch; p++) - { - real *ptr_gradOutput = gradOutput_data + p*nInputPlane*outputHeight*outputWidth + k*outputWidth*outputHeight; - real sum; - long xx, yy; - long i; - - sum = 0; - for(i = 0; i < outputWidth*outputHeight; i++) - sum += ptr_gradOutput[i]; - gradBias_data[k] += scale*sum; - - sum = 0; - for(yy = 0; yy < outputHeight; yy++) - { - for(xx = 0; xx < outputWidth; xx++) - { - real *ptr_input = input_data + p*nInputPlane*inputWidth*inputHeight + k*inputWidth*inputHeight + yy*dH*inputWidth+xx*dW; - real z = *ptr_gradOutput++; - long kx, ky; - - for(ky = 0; ky < kH; ky++) - { - for(kx = 0; kx < kW; kx++) - sum += z * ptr_input[kx]; - ptr_input += inputWidth; - } - } - } - gradWeight_data[k] += scale*sum; - } - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialUpSamplingBilinear.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialUpSamplingBilinear.c deleted file mode 100644 index 8bc487ead3..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialUpSamplingBilinear.c +++ /dev/null @@ -1,174 +0,0 @@ -// Adapted from interp.cpp from Caffe util by Pauline Luc -// Originally developed by George Papandreou - -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialUpSamplingBilinear.c" -#else - -static inline void THNN_(SpatialUpSamplingBilinear_shapeCheck) - (THTensor *input, THTensor *gradOutput, - int nBatch, int nChannels, - int inputHeight, int inputWidth, - int outputHeight, int outputWidth) { - THArgCheck(inputHeight > 0 && inputWidth > 0 - && outputHeight > 0 && outputWidth > 0, 2, - "input and output sizes should be greater than 0," - " but got input (H: %d, W: %d) output (H: %d, W: %d)", - inputHeight, inputWidth, outputHeight, outputWidth); - if (input != NULL) { - THNN_ARGCHECK(input->nDimension == 4, 2, input, - "4D input tensor expected but got: %s"); - } - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, 4, 0, nBatch); - THNN_CHECK_DIM_SIZE(gradOutput, 4, 1, nChannels); - THNN_CHECK_DIM_SIZE(gradOutput, 4, 2, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, 4, 3, outputWidth); - } -} - -void THNN_(SpatialUpSamplingBilinear_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int outputHeight, - int outputWidth){ - - int nbatch = THTensor_(size)(input, 0); - int channels = THTensor_(size)(input, 1); - int inputHeight = THTensor_(size)(input, 2); - int inputWidth = THTensor_(size)(input, 3); - - THNN_(SpatialUpSamplingBilinear_shapeCheck) - (input, NULL, - nbatch, channels, - inputHeight, inputWidth, - outputHeight, outputWidth); - - input = THTensor_(newContiguous)(input); - THTensor_(resize4d)(output, - THTensor_(size)(input, 0), - THTensor_(size)(input, 1), - outputHeight, outputWidth); - THTensor_(zero)(output); - real *idata = THTensor_(data)(input); - real *odata = THTensor_(data)(output); - channels = nbatch * channels; - THAssert(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0); - // special case: just copy - if (inputHeight == outputHeight && inputWidth == outputWidth) { - for (int h2 = 0; h2 < outputHeight; ++h2) { - const int h1 = h2; - for (int w2 = 0; w2 < outputWidth; ++w2) { - const int w1 = w2; - const real* pos1 = &idata[h1 * inputWidth + w1]; - real* pos2 = &odata[h2 * outputWidth + w2]; - for (int c = 0; c < channels; ++c) { - pos2[0] = pos1[0]; - pos1 += inputWidth * inputHeight; - pos2 += outputWidth * outputHeight; - } - } - } - return; - } - const float rheight =(outputHeight > 1) ? (float)(inputHeight - 1)/(outputHeight - 1) : 0.f; - const float rwidth = (outputWidth > 1) ? (float)(inputWidth - 1) / (outputWidth - 1) : 0.f; - for (int h2 = 0; h2 < outputHeight; ++h2) { - const float h1r = rheight * h2; - const int h1 = h1r; - const int h1p = (h1 < inputHeight - 1) ? 1 : 0; - const real h1lambda = h1r - h1; - const real h0lambda = (real)1. - h1lambda; - for (int w2 = 0; w2 < outputWidth; ++w2) { - const float w1r = rwidth * w2; - const int w1 = w1r; - const int w1p = (w1 < inputWidth - 1) ? 1 : 0; - const real w1lambda = w1r - w1; - const real w0lambda = (real)1. - w1lambda; - const real* pos1 = &idata[h1 * inputWidth + w1]; - real* pos2 = &odata[h2 * outputWidth + w2]; - for (int c = 0; c < channels; ++c) { - pos2[0] = h0lambda * (w0lambda * pos1[0]+ w1lambda * pos1[w1p]) - + h1lambda * (w0lambda * pos1[h1p * inputWidth] - + w1lambda * pos1[h1p * inputWidth + w1p]); - pos1 += inputWidth * inputHeight; - pos2 += outputWidth * outputHeight; - } - } - } - THTensor_(free)(input); -} - -void THNN_(SpatialUpSamplingBilinear_updateGradInput)( - THNNState *state, - THTensor *gradOutput, - THTensor *gradInput, - int nbatch, - int channels, - int inputHeight, - int inputWidth, - int outputHeight, - int outputWidth){ - - THNN_(SpatialUpSamplingBilinear_shapeCheck) - (NULL, gradOutput, - nbatch, channels, - inputHeight, inputWidth, - outputHeight, outputWidth); - - THTensor_(resize4d)(gradInput, nbatch, channels, inputHeight, inputWidth); - THTensor_(zero)(gradInput); - gradOutput = THTensor_(newContiguous)(gradOutput); - real *data1 = THTensor_(data)(gradInput); - real *data2 = THTensor_(data)(gradOutput); - channels = nbatch * channels; - - // special case: same-size matching grids - if (inputHeight == outputHeight && inputWidth == outputWidth) { - for (int h2 = 0; h2 < outputHeight; ++h2) { - const int h1 = h2; - for (int w2 = 0; w2 < outputWidth; ++w2) { - const int w1 = w2; - real* pos1 = &data1[h1 * inputWidth + w1]; - const real* pos2 = &data2[h2 * outputWidth + w2]; - for (int c = 0; c < channels; ++c) { - pos1[0] += pos2[0]; - pos1 += inputWidth * inputHeight; - pos2 += outputWidth * outputHeight; - } - } - } - return; - } - const float rheight =(outputHeight > 1) ? (float)(inputHeight - 1)/(outputHeight - 1) : 0.f; - const float rwidth = (outputWidth > 1) ? (float)(inputWidth - 1)/(outputWidth - 1) : 0.f; - for (int h2 = 0; h2 < outputHeight; ++h2) { - const float h1r = rheight * h2; - const int h1 = h1r; - const int h1p = (h1 < inputHeight - 1) ? 1 : 0; - const real h1lambda = h1r - h1; - const real h0lambda = (real)1. - h1lambda; - for (int w2 = 0; w2 < outputWidth; ++w2) { - const float w1r = rwidth * w2; - const int w1 = w1r; - const int w1p = (w1 < inputWidth - 1) ? 1 : 0; - const real w1lambda = w1r - w1; - const real w0lambda = (real)1. - w1lambda; - real* pos1 = &data1[h1 * inputWidth + w1]; - const real* pos2 = &data2[h2 * outputWidth + w2]; - for (int c = 0; c < channels; ++c) { - pos1[0] += h0lambda * w0lambda * pos2[0]; - pos1[w1p] += h0lambda * w1lambda * pos2[0]; - pos1[h1p * inputWidth] += h1lambda * w0lambda * pos2[0]; - pos1[h1p * inputWidth + w1p] += h1lambda * w1lambda * pos2[0]; - pos1 += inputWidth * inputHeight; - pos2 += outputWidth * outputHeight; - } - } - } - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/SpatialUpSamplingNearest.c b/contrib/lua-torch/nn/lib/THNN/generic/SpatialUpSamplingNearest.c deleted file mode 100644 index b4699ff3ef..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/SpatialUpSamplingNearest.c +++ /dev/null @@ -1,199 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/SpatialUpSamplingNearest.c" -#else - - -static inline void THNN_(SpatialUpSamplingNearest_shapeCheck) - (THTensor *input, THTensor *gradOutput, - int scale_factor) { - THArgCheck(input != NULL, 2, "4D input tensor expected but got NULL"); - THArgCheck(scale_factor > 1, 4, - "scale_factor must be greater than 1, but got: %d", scale_factor); - THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input, - "3D or 4D input tensor expected but got: %s"); - if (input->nDimension == 3) { - int nChannels = THTensor_(size)(input, 0); - int inputHeight = THTensor_(size)(input, 1); - int inputWidth = THTensor_(size)(input, 2); - int outputHeight = inputHeight * scale_factor; - int outputWidth = inputWidth * scale_factor; - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, 3, 0, nChannels); - THNN_CHECK_DIM_SIZE(gradOutput, 3, 1, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, 3, 2, outputWidth); - } - } else { - int nBatch = THTensor_(size)(input, 0); - int nChannels = THTensor_(size)(input, 1); - int inputHeight = THTensor_(size)(input, 2); - int inputWidth = THTensor_(size)(input, 3); - int outputHeight = inputHeight * scale_factor; - int outputWidth = inputWidth * scale_factor; - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, 4, 0, nBatch); - THNN_CHECK_DIM_SIZE(gradOutput, 4, 1, nChannels); - THNN_CHECK_DIM_SIZE(gradOutput, 4, 2, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, 4, 3, outputWidth); - } - } -} - -void THNN_(SpatialUpSamplingNearest_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int scale_factor) -{ - THNN_(SpatialUpSamplingNearest_shapeCheck)(input, NULL, scale_factor); - int inputHeight = THTensor_(size)(input, input->nDimension-2); - int inputWidth = THTensor_(size)(input, input->nDimension-1); - int outputHeight = inputHeight * scale_factor; - int outputWidth = inputWidth * scale_factor; - - if (input->nDimension == 3) { - THTensor_(resize3d)(output, - THTensor_(size)(input, 0), - outputHeight, outputWidth); - } else { - THTensor_(resize4d)(output, - THTensor_(size)(input, 0), - THTensor_(size)(input, 1), - outputHeight, outputWidth); - } - - int dW = scale_factor; - int dH = scale_factor; - int xDim = input->nDimension-2; - int yDim = input->nDimension-1; - - // dims - int idim = input->nDimension; - int osz0 = output->size[0]; - int osz1 = output->size[1]; - int osz2 = output->size[2]; - int osz3 = 1; - if (idim > 3) { - osz3 = output->size[3]; - } - - // get strides - long *is = input->stride; - long *os = output->stride; - - // get raw pointers - real *pin = THTensor_(data)(input); - real *pout = THTensor_(data)(output); - - // perform the upsampling - int i0, i1, i2, i3, isrc, idst; - int iout[4]; // Output indices - int iin[4]; // Input indices - - for (i0 = 0; i0 < osz0; i0++) { - iout[0] = i0; - iin[0] = i0; - for (i1 = 0; i1 < osz1; i1++) { - iout[1] = i1; - iin[1] = i1; - for (i2 = 0; i2 < osz2; i2++) { - iout[2] = i2; - iin[2] = i2; - for (i3 = 0; i3 < osz3; i3++) { - iout[3] = i3; - iin[3] = i3; - - // set the indices for the upsampled dimensions - iin[xDim] = iout[xDim] / dW; - iin[yDim] = iout[yDim] / dH; - - idst = i0*os[0] + i1*os[1] + i2*os[2]; - isrc = iin[0]*is[0] + iin[1]*is[1] + iin[2]*is[2]; - if (idim > 3) { - idst += i3*os[3]; - isrc += iin[3]*is[3]; - } - - pout[idst] = pin[isrc]; - } - } - } - } -} - -void THNN_(SpatialUpSamplingNearest_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int scale_factor) -{ - THNN_(SpatialUpSamplingNearest_shapeCheck)(input, gradOutput, scale_factor); - THTensor_(resizeAs)(gradInput, input); - - int dW = scale_factor; - int dH = scale_factor; - int xDim = gradInput->nDimension-2; - int yDim = gradInput->nDimension-1; - - // dims - int idim = gradInput->nDimension; // Guaranteed to be between 3 and 5 - int isz0 = gradInput->size[0]; - int isz1 = gradInput->size[1]; - int isz2 = gradInput->size[2]; - int isz3 = 1; - if (idim > 3) { - isz3 = gradInput->size[3]; - } - - // get strides - long *is = gradInput->stride; - long *os = gradOutput->stride; - - // get raw pointers - real *pin = THTensor_(data)(gradInput); - real *pout = THTensor_(data)(gradOutput); - - // perform the upsampling - int i0, i1, i2, i3, isrc, idst, x, y; - int iin[4]; // Input indices - int iout[4]; // Output indices - - THTensor_(zero)(gradInput); - - for (i0 = 0; i0 < isz0; i0++) { - iin[0] = i0; - iout[0] = i0; - for (i1 = 0; i1 < isz1; i1++) { - iin[1] = i1; - iout[1] = i1; - for (i2 = 0; i2 < isz2; i2++) { - iin[2] = i2; - iout[2] = i2; - for (i3 = 0; i3 < isz3; i3++) { - iin[3] = i3; - iout[3] = i3; - - idst = i0*is[0] + i1*is[1] + i2*is[2]; - if (idim > 3) { - idst += i3*is[3]; - } - - // Now accumulate the gradients from gradOutput - for (y = 0; y < dH; y++) { - for (x = 0; x < dW; x++) { - iout[xDim] = dW * iin[xDim] + x; - iout[yDim] = dH * iin[yDim] + y; - isrc = iout[0]*os[0] + iout[1]*os[1] + iout[2]*os[2]; - if (idim > 3) { - isrc += iout[3]*os[3]; - } - pin[idst] += pout[isrc]; - } - } - } - } - } - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/Sqrt.c b/contrib/lua-torch/nn/lib/THNN/generic/Sqrt.c deleted file mode 100644 index 174884e34a..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/Sqrt.c +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/Sqrt.c" -#else - -void THNN_(Sqrt_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal eps_) -{ - real eps = TH_CONVERT_ACCREAL_TO_REAL(eps_); - THTensor_(resizeAs)(output, input); - THTensor_(sqrt)(output, input); -} - -void THNN_(Sqrt_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output) -{ - THNN_CHECK_SHAPE(output, gradOutput); - THTensor_(resizeAs)(gradInput, input); - - if (output->nDimension == 1 || - !THTensor_(isContiguous)(output) || - !THTensor_(isContiguous)(gradOutput) || - !THTensor_(isContiguous)(gradInput)) - { - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, - *gradInput_data = (*output_data == 0.0) ? 0.0 : (0.5 * (*gradOutput_data / *output_data)); - ); - } - else - { - real *gradOutput_data = THTensor_(data)(gradOutput); - real *gradInput_data = THTensor_(data)(gradInput); - real *output_data = THTensor_(data)(output); - long i; -#pragma omp parallel for private(i) - for(i = 0; i < THTensor_(nElement)(output); i++) - { - if (output_data[i] == 0.0) - gradInput_data[i] = 0.0; - else - gradInput_data[i] = 0.5 * (gradOutput_data[i] / output_data[i]); - } - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/Square.c b/contrib/lua-torch/nn/lib/THNN/generic/Square.c deleted file mode 100644 index aad0a911c5..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/Square.c +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/Square.c" -#else - -void THNN_(Square_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output) -{ - THTensor_(resizeAs)(output, input); - - if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) - { - TH_TENSOR_APPLY2(real, output, real, input, - *output_data = (*input_data) * (*input_data); - ); - } - else - { - real *output_data = THTensor_(data)(output); - real *input_data = THTensor_(data)(input); - long i; -#pragma omp parallel for private(i) - for (i = 0; i < THTensor_(nElement)(input); i++) - output_data[i] = input_data[i]*input_data[i]; - } -} - -void THNN_(Square_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput) -{ - THNN_CHECK_SHAPE(input, gradOutput); - THTensor_(resizeAs)(gradInput, input); - - if (input->nDimension == 1 || - !THTensor_(isContiguous)(input) || - !THTensor_(isContiguous)(gradOutput) || - !THTensor_(isContiguous)(gradInput)) - { - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, - *gradInput_data = 2.0 * (*gradOutput_data) * (*input_data); - ); - } - else - { - real *gradOutput_data = THTensor_(data)(gradOutput); - real *gradInput_data = THTensor_(data)(gradInput); - real *input_data = THTensor_(data)(input); - long i; -#pragma omp parallel for private(i) - for (i = 0; i < THTensor_(nElement)(gradInput); i++) - gradInput_data[i] = 2.0 * gradOutput_data[i] * input_data[i]; - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/THNN.h b/contrib/lua-torch/nn/lib/THNN/generic/THNN.h deleted file mode 100644 index 76a28eb2d7..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/THNN.h +++ /dev/null @@ -1,1501 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THNN.h" -#else - -TH_API void THNN_(Abs_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *output); // [OUT] Abs output -TH_API void THNN_(Abs_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *gradOutput, // gradient w.r.t. output - THTensor *gradInput); // [OUT] gradient w.r.t. input - -TH_API void THNN_(AbsCriterion_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *target, // tensor with target values - THTensor *output, // [OUT] a one-element tensor with loss - bool sizeAverage); // if true, the loss will be divided by batch size -TH_API void THNN_(AbsCriterion_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *target, // tensor with target values - THTensor *gradInput, // [OUT] gradient w.r.t. input - bool sizeAverage); // if true, the gradient will be normalized by batch size - -TH_API void THNN_(BCECriterion_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *output, - bool sizeAverage, - THTensor *weights); // [OPTIONAL] -TH_API void THNN_(BCECriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *gradInput, - bool sizeAverage, - THTensor *weights); // [OPTIONAL] - -TH_API void THNN_(ClassNLLCriterion_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor (1D/2D) - THIndexTensor *target, // tensor containing indexes of target classes - THTensor *output, // [OUT] a one-element tensor with loss - bool sizeAverage, // if true, the loss will be normalized by batch size and class weights - THTensor *weights, // [OPTIONAL] class weights - THTensor *total_weight, // [BUFFER] - long ignore_index); // target index to ignore (loss = 0, gradInput = 0) -TH_API void THNN_(ClassNLLCriterion_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor (1D/2D) - THIndexTensor *target, // tensor containing indexes of target classes - THTensor *gradInput, // [OUT] gradient w.r.t. input - bool sizeAverage, // if true, the loss will be normalized by batch size and class weights - THTensor *weights, // [OPTIONAL] class weights - THTensor *total_weight, // [BUFFER] - long ignore_index); // target index to ignore (loss = 0, gradInput = 0) - -TH_API void THNN_(SpatialClassNLLCriterion_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor (4D) - THIndexTensor *target, // tensor containing indexes of target classes (3D) - THTensor *output, // [OUT] a one-element tensor with loss - bool sizeAverage, // if true, the loss will be normalized by batch size and class weights - THTensor *weights, // [OPTIONAL] class weights - THTensor *total_weight); // [BUFFER] -TH_API void THNN_(SpatialClassNLLCriterion_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor (4D) - THIndexTensor *target, // tensor containing indexes of target classes (3D) - THTensor *gradInput, // [OUT] gradient w.r.t. input - bool sizeAverage, // if true, the loss will be normalized by batch size and class weights - THTensor *weights, // [OPTIONAL] class weights - THTensor *total_weight); // [BUFFER] - -TH_API void THNN_(ELU_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *output, // [OUT] ELU output - accreal alpha, // an ELU parameter (as in paper) - bool inplace); // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated) -TH_API void THNN_(ELU_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *gradOutput, // gradient w.r.t. output - THTensor *gradInput, // [OUT] gradient w.r.t. input - THTensor *output, // output from a forward pass - accreal alpha, // an ELU parameter (as in paper) - bool inplace); // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated) - -TH_API void THNN_(DistKLDivCriterion_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *target, // target tensor - THTensor *output, // [OUT] a one-element tensor containing the loss - bool sizeAverage); // if true, the loss will be normalized **by total number of elements** -TH_API void THNN_(DistKLDivCriterion_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *target, // target tensor - THTensor *gradInput, // [OUT] gradient w.r.t. input - bool sizeAverage); // if true, the loss will be normalized **by total number of elements** - -TH_API void THNN_(GatedLinear_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *output, // [OUT] output tensor, half size of input along dimension dim - int dim); // dimension for halving operation -TH_API void THNN_(GatedLinear_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *gradOutput, // gradient w.r.t module's output - THTensor *gradInput, // [OUT] gradient w.r.t input - int dim); // dimension for halving operation - -// HardShink outputs 0 on interval of (-lambda; lambda) or original value otherwise. -TH_API void THNN_(HardShrink_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *output, // [OUT] output tensor - accreal lambda); // HardShrink parameter -TH_API void THNN_(HardShrink_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *gradOutput, // gradient w.r.t. module's output - THTensor *gradInput, // [OUT] gradient w.r.t. input - accreal lambda); // HardShrink parameter - -// HardTanh clamps the values to the interval [min_val; max_val]. -TH_API void THNN_(HardTanh_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *output, // [OUT] output tensor - accreal min_val, // lower threshold - accreal max_val, // upper threshold - bool inplace); -TH_API void THNN_(HardTanh_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *gradOutput, // gradient w.r.t. module's output - THTensor *gradInput, // [OUT] gradient w.r.t. the input - accreal min_val, // lower threshold - accreal max_val, // upper threshold - bool inplace); - -TH_API void THNN_(L1Cost_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *output); // [OUT] output tensor -TH_API void THNN_(L1Cost_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *gradOutput, // [OPTIONAL] gradient w.r.t module's output - THTensor *gradInput); // [OUT] gradient w.r.t the input - -TH_API void THNN_(LeakyReLU_updateOutput)( - THNNState *state, // library's state - THTensor *input, // [MODIFIED] input tensor - THTensor *output, // [OUT] output tensor - accreal negval, // negative part slope - bool inplace); // if true, modifies the input tensor and sets the output tensor on it (no additional memory is allocated) -TH_API void THNN_(LeakyReLU_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *gradOutput, // [MODIFIED] gradient w.r.t. module's output - THTensor *gradInput, // [OUT] gradient w.r.t. the input - accreal negval, // negative part slope - bool inplace); // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated) - -TH_API void THNN_(GRUFused_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *hidden, - THTensor *bias1, // [OPTIONAL] - THTensor *bias2, // [OPTIONAL] - THTensor *hx, - THTensor *output, - THTensor *storage); -TH_API void THNN_(GRUFused_updateGradInput)( - THNNState *state, - THTensor *gradInInput, - THTensor *gradInHidden, - THTensor *gradOutput, - THTensor *gradInputHx, - THTensor *storage); - -TH_API void THNN_(LSTMFused_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *hidden, - THTensor *bias1, // [OPTIONAL] - THTensor *bias2, // [OPTIONAL] - THTensor *cell, - THTensor *output, - THTensor *outputCell); -TH_API void THNN_(LSTMFused_updateGradInput)( - THNNState *state, - THTensor *storage, - THTensor *gradInGates, - THTensor *cx, - THTensor *cy, - THTensor *gradOutput, - THTensor *gradOutputCell, - THTensor *gradInputCx); - -TH_API void THNN_(LogSigmoid_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *output, // output tensor - THTensor *buffer); // [BUFFER] -TH_API void THNN_(LogSigmoid_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input - THTensor *gradOutput, // gradient w.r.t. module's output - THTensor *gradInput, // [OUT] gradient w.r.t. input - THTensor *buffer); // [BUFFER] - -TH_API void THNN_(LogSoftMax_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *output); // [OUT] output tensor -TH_API void THNN_(LogSoftMax_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *gradOutput, // gradient w.r.t. module's output - THTensor *gradInput, // [OUT] gradient w.r.t. input - THTensor *output); // module's output - -TH_API void THNN_(LookupTable_accGradParameters)( - THNNState *state, - THIndexTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THIntegerTensor *count, - THTensor *sorted, // [OPTIONAL] - THIndexTensor *indices, // [OPTIONAL] - bool scaleGradByFreq, - int paddingValue, - accreal scale); - -TH_API void THNN_(LookupTable_renorm)( - THNNState *state, // library's state - THIndexTensor *idx, // vector containing row indices (modified in function) - THTensor *weight, // 2D tensor whose rows will be renormalized - accreal maxNorm, // maximum norm - accreal normType); // the norm type (e.g., normType=2, then it's 2-norm) - -TH_API void THNN_(MarginCriterion_updateOutput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *target, // target tensor (should contain only 1s and -1s) - THTensor *output, // [OUT] a one-element tensor containing the loss - bool sizeAverage, // if true, the loss is normalized by **total number of elements** - accreal margin); // a margin that is required for the loss to be 0 - -TH_API void THNN_(MarginCriterion_updateGradInput)( - THNNState *state, // library's state - THTensor *input, // input tensor - THTensor *target, // target tensor (should contin only 1s and -1s) - THTensor *gradInput, // [OUT] gradient w.r.t. module's input - bool sizeAverage, // if true, the gradient is normalized by **total number of elements** - accreal margin); // a margin that is required for the loss to be 0 - -TH_API void THNN_(SoftMarginCriterion_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *output, - bool sizeAverage); - -TH_API void THNN_(SoftMarginCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *gradInput, - bool sizeAverage); - -TH_API void THNN_(MSECriterion_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *output, - bool sizeAverage); -TH_API void THNN_(MSECriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *gradInput, - bool sizeAverage); - -TH_API void THNN_(MultiLabelMarginCriterion_updateOutput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *output, - THTensor *isTarget, - bool sizeAverage); -TH_API void THNN_(MultiLabelMarginCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *gradInput, - THTensor *isTarget, - bool sizeAverage); - -TH_API void THNN_(MultiMarginCriterion_updateOutput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *output, - bool sizeAverage, - int p, - THTensor* weights, // [OPTIONAL] - accreal margin); -TH_API void THNN_(MultiMarginCriterion_updateGradInput)( - THNNState *state, - THTensor *input, - THIndexTensor *target, - THTensor *gradInput, - bool sizeAverage, - int p, - THTensor *weights, // [OPTIONAL] - accreal margin); - -TH_API void THNN_(PReLU_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THIndex_t nOutputPlane); -TH_API void THNN_(PReLU_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THIndex_t nOutputPlane); -TH_API void THNN_(PReLU_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *gradWeight, - THTensor *gradWeightBuf, - THTensor *gradWeightBuf2, - THIndex_t nOutputPlane, - accreal scale); - -TH_API void THNN_(Linear_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *addBuffer); -TH_API void THNN_(Linear_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight); -TH_API void THNN_(Linear_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *bias, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *addBuffer, - accreal scale); - -TH_API void THNN_(RReLU_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *noise, - accreal lower, - accreal upper, - bool train, - bool inplace, - THGenerator *generator); -TH_API void THNN_(RReLU_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *noise, - accreal lower, - accreal upper, - bool train, - bool inplace); - -TH_API void THNN_(Sigmoid_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output); -TH_API void THNN_(Sigmoid_updateGradInput)( - THNNState *state, - THTensor *input, // [OPTIONAL] - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output); - -TH_API void THNN_(SmoothL1Criterion_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *output, - bool sizeAverage); -TH_API void THNN_(SmoothL1Criterion_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *target, - THTensor *gradInput, - bool sizeAverage); - -TH_API void THNN_(SoftMax_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output); -TH_API void THNN_(SoftMax_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output); - -TH_API void THNN_(SoftPlus_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal beta, - accreal threshold); -TH_API void THNN_(SoftPlus_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output, - accreal beta, - accreal threshold); - -TH_API void THNN_(SoftShrink_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal lambda); -TH_API void THNN_(SoftShrink_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - accreal lambda); - - -TH_API void THNN_(IndexLinear_updateOutput)( - THNNState *state, - THIndexTensor *keys, - long keysOffset, - THTensor *values, - THIndexTensor *sizes, - THIndexTensor *cumSumSizes, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *normalizedValues, - int train); -TH_API void THNN_(IndexLinear_accGradParameters)( - THNNState *state, - THIndexTensor *keys, - long keysOffset, - THTensor *values, - THIndexTensor *sizes, - THIndexTensor *cumSumSizes, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *weight, - THTensor *bias, - THTensor* valuesBuffer, - accreal weightDecay, - accreal scale); -TH_API void THNN_(IndexLinear_accUpdateGradParameters)( - THNNState *state, - THIndexTensor *keys, - long keysOffset, - THTensor *values, - THIndexTensor *sizes, - THIndexTensor *cumSumSizes, - THTensor *gradOutput, - THTensor *weight, - THTensor *bias, - accreal weightDecay, - accreal scale); -TH_API void THNN_(IndexLinear_updateParameters)( - THNNState *state, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *weight, - THTensor *bias, - THIndexTensor *runningKeys, - THIndexTensor *cumSumSizes, - long keysOffset, - accreal weightDecay, - accreal learningRate); - -TH_API void THNN_(SparseLinear_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias); -TH_API void THNN_(SparseLinear_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *weight, - THTensor *bias, - accreal weightDecay, - accreal scale); -TH_API void THNN_(SparseLinear_zeroGradParameters)( - THNNState *state, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *lastInput); -TH_API void THNN_(SparseLinear_updateParameters)( - THNNState *state, - THTensor *weight, - THTensor *bias, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *lastInput, - accreal learningRate); -TH_API void THNN_(SparseLinear_legacyUpdateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias); -TH_API void THNN_(SparseLinear_legacyAccGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *weight, - THTensor *bias, - accreal weightDecay, - accreal scale); -TH_API void THNN_(SparseLinear_legacyZeroGradParameters)( - THNNState *state, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *lastInput); -TH_API void THNN_(SparseLinear_legacyUpdateParameters)( - THNNState *state, - THTensor *weight, - THTensor *bias, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *lastInput, - accreal learningRate); - -TH_API void THNN_(Sqrt_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal eps); -TH_API void THNN_(Sqrt_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output); - -TH_API void THNN_(Square_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output); -TH_API void THNN_(Square_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput); - -TH_API void THNN_(Tanh_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output); -TH_API void THNN_(Tanh_updateGradInput)( - THNNState *state, - THTensor *input, // [OPTIONAL] - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output); - -TH_API void THNN_(Threshold_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal threshold, - accreal val, - bool inplace); -TH_API void THNN_(Threshold_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - accreal threshold, - accreal val, - bool inplace); - -TH_API void THNN_(TemporalConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - int kW, int dW, - int inputFrameSize, - int outputFrameSize); -TH_API void THNN_(TemporalConvolution_updateGradInput)( - THNNState* state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - int kW, int dW); -TH_API void THNN_(TemporalConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - int kW, int dW, - accreal scale); -TH_API void THNN_(TemporalMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int kW, int dW); -TH_API void THNN_(TemporalMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int kW, int dW); -TH_API void THNN_(TemporalSubSampling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - int kW, int dW, - int inputFrameSize); -TH_API void THNN_(TemporalSubSampling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - int kW, int dW); -TH_API void THNN_(TemporalSubSampling_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - int kW, int dW, - accreal scale); - -TH_API void THNN_(TemporalRowConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - THTensor *fgradInput, - int kW, - int dW, - int padW, - bool featFirst); -TH_API void THNN_(TemporalRowConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, - int kW, - int dW, - int padW, - bool featFirst); -TH_API void THNN_(TemporalRowConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - THTensor *fgradInput, - int kW, - int dW, - int padW, - bool featFirst, - accreal scale); - -TH_API void THNN_(BatchNormalization_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, // [OPTIONAL] - THTensor *bias, // [OPTIONAL] - THTensor *running_mean, - THTensor *running_var, - THTensor *save_mean, - THTensor *save_std, - bool train, - double momentum, - double eps); -TH_API void THNN_(BatchNormalization_backward)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, // [OPTIONAL] - THTensor *gradWeight, // [OPTIONAL] - THTensor *gradBias, // [OPTIONAL] - THTensor *weight, // [OPTIONAL] - THTensor *running_mean, - THTensor *running_var, - THTensor *save_mean, - THTensor *save_std, - bool train, - double scale, - double eps); - -TH_API void THNN_(SpatialConvolutionMap_updateOutput)( - THNNState *state, // library state - THTensor *input, // input tensor - THTensor *output, // [OUT] convolution output - THTensor *weight, // 3D weight tensor (connTable:size(1) x kH x kW) - THTensor *bias, // 1D bias tensor (nOutputPlane) - THTensor *connTable, // connection table - int nInputPlane, // number of input planes - int nOutputPlane, // number of output planes - int dW, int dH); // stride -TH_API void THNN_(SpatialConvolutionMap_updateGradInput)( - THNNState *state, // library state - THTensor *input, // input tensor - THTensor *gradOutput, // gradient w.r.t. output - THTensor *gradInput, // [OUT] gradient w.r.t. input - THTensor *weight, // 3D weight tensor (connTable:size(1) x kH x kW) - THTensor *bias, // 1D bias tensor (nOutputPlane) - THTensor *connTable, // connection table - int nInputPlane, // number of input planes - int nOutputPlane, // number of output planes - int dW, int dH); // stride -TH_API void THNN_(SpatialConvolutionMap_accGradParameters)( - THNNState *state, // library state - THTensor *input, // input tensor - THTensor *gradOutput, // gradient w.r.t. output - THTensor *gradWeight, // 3D gradWeight tensor (connTable:size(1) x kH x kW) - THTensor *gradBias, // 1D gradBias tensor (nOutputPlane) - THTensor *connTable, // connection table - int nInputPlane, // number of input planes - int nOutputPlane, // number of output planes - int dW, int dH, // stride - accreal scale); // scaling factor - -TH_API void THNN_(SpatialConvolutionMM_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, // [OPTIONAL] - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH); -TH_API void THNN_(SpatialConvolutionMM_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH); -TH_API void THNN_(SpatialConvolutionMM_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, // [OPTIONAL] - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH, - accreal scale); - -TH_API void THNN_(SpatialDepthWiseConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, // [OPTIONAL] - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH); -TH_API void THNN_(SpatialDepthWiseConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH); -TH_API void THNN_(SpatialDepthWiseConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, // [OPTIONAL] - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH, - accreal scale); - -TH_API void THNN_(SpatialConvolutionLocal_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH, - long inputWidth, long inputHeight, - long outputWidth, long outputHeight); -TH_API void THNN_(SpatialConvolutionLocal_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH, - long inputWidth, long inputHeight, - long outputWidth, long outputHeight); -TH_API void THNN_(SpatialConvolutionLocal_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - THTensor *fgradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH, - long inputWidth, long inputHeight, - long outputWidth, long outputHeight, - accreal scale); - -TH_API void THNN_(SpatialAdaptiveMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int owidth, int oheight); -TH_API void THNN_(SpatialAdaptiveMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices); - -TH_API void THNN_(SpatialAdaptiveAveragePooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int owidth, int oheight); -TH_API void THNN_(SpatialAdaptiveAveragePooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput); - -TH_API void THNN_(SpatialAveragePooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int kW, int kH, - int dW, int dH, - int padW, int padH, - bool ceil_mode, - bool count_include_pad); -TH_API void THNN_(SpatialAveragePooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int kW, int kH, - int dW, int dH, - int padW, int padH, - bool ceil_mode, - bool count_include_pad); - -TH_API void THNN_(SpatialFractionalMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int outputW, int outputH, - int poolSizeW, int poolSizeH, - THIndexTensor *indices, - THTensor *randomSamples); -TH_API void THNN_(SpatialFractionalMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int outputW, int outputH, - int poolSizeW, int poolSizeH, - THIndexTensor *indices); - -TH_API void THNN_(SpatialFullConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, // [OPTIONAL] - THTensor *columns, - THTensor *ones, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int adjW, int adjH); -TH_API void THNN_(SpatialFullConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *gradColumns, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int adjW, int adjH); -TH_API void THNN_(SpatialFullConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, // [OPTIONAL] - THTensor *columns, - THTensor *ones, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int adjW, int adjH, - accreal scale); - -TH_API void THNN_(SpatialFullConvolutionMap_updateOutput)( - THNNState *state, // library state - THTensor *input, // input tensor - THTensor *output, // [OUT] convolution output - THTensor *weight, // 3D weight tensor (connTable:size(1) x kH x kW) - THTensor *bias, // 1D bias tensor (nOutputPlane) - THTensor *connTable, // connection table - int nInputPlane, // number of input planes - int nOutputPlane, // number of output planes - int dW, int dH); // stride -TH_API void THNN_(SpatialFullConvolutionMap_updateGradInput)( - THNNState *state, // library state - THTensor *input, // input tensor - THTensor *gradOutput, // gradient w.r.t. output - THTensor *gradInput, // [OUT] gradient w.r.t. input - THTensor *weight, // 3D weight tensor (connTable:size(1) x kH x kW) - THTensor *bias, // 1D bias tensor (nOutputPlane) - THTensor *connTable, // connection table - int nInputPlane, // number of input planes - int nOutputPlane, // number of output planes - int dW, int dH); // stride -TH_API void THNN_(SpatialFullConvolutionMap_accGradParameters)( - THNNState *state, // library state - THTensor *input, // input tensor - THTensor *gradOutput, // gradient w.r.t. output - THTensor *gradWeight, // 3D gradWeight tensor (connTable:size(1) x kH x kW) - THTensor *gradBias, // 1D gradBias tensor (nOutputPlane) - THTensor *connTable, // connection table - int nInputPlane, // number of input planes - int nOutputPlane, // number of output planes - int dW, int dH, // stride - accreal scale); // scaling factor - -TH_API void THNN_(SpatialDilatedConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, // [OPTIONAL] - THTensor *columns, - THTensor *ones, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int dilationW, int dilationH); - -TH_API void THNN_(SpatialDilatedConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *gradColumns, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int dilationW, int dilationH); - -TH_API void THNN_(SpatialDilatedConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, // [OPTIONAL] - THTensor *columns, - THTensor *ones, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int dilationW, int dilationH, - accreal scale); - -TH_API void THNN_(SpatialMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int kW, int kH, - int dW, int dH, - int padW, int padH, - bool ceil_mode); -TH_API void THNN_(SpatialMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int kW, int kH, - int dW, int dH, - int padW, int padH, - bool ceil_mode); - -TH_API void THNN_(SpatialDilatedMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int dilationW, int dilationH, - bool ceil_mode); -TH_API void THNN_(SpatialDilatedMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int dilationW, int dilationH, - bool ceil_mode); - -TH_API void THNN_(SpatialMaxUnpooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int owidth, int oheight); -TH_API void THNN_(SpatialMaxUnpooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int owidth, int oheight); - -TH_API void THNN_(SpatialSubSampling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - int kW, int kH, - int dW, int dH); -TH_API void THNN_(SpatialSubSampling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - int kW, int kH, - int dW, int dH); -TH_API void THNN_(SpatialSubSampling_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - int kW, int kH, - int dW, int dH, - accreal scale); - -TH_API void THNN_(SpatialUpSamplingNearest_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int scale_factor); -TH_API void THNN_(SpatialUpSamplingNearest_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int scale_factor); - -TH_API void THNN_(SpatialUpSamplingBilinear_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int outputHeight, - int outputWidth); -TH_API void THNN_(SpatialUpSamplingBilinear_updateGradInput)( - THNNState *state, - THTensor *gradOutput, - THTensor *gradInput, - int nbatch, - int nchannels, - int inputHeight, - int inputWidth, - int outputHeight, - int outputWidth); - -TH_API void THNN_(unfolded_acc)( - THTensor *finput, - THTensor *input, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int nInputPlane, - int inputWidth, int inputHeight, - int outputWidth, int outputHeight); -TH_API void THNN_(unfolded_copy)( - THTensor *finput, - THTensor *input, - int kW, int kH, - int dW, int dH, - int padW, int padH, - int nInputPlane, - int inputWidth, int inputHeight, - int outputWidth, int outputHeight); - -TH_API void THNN_(VolumetricAveragePooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int kT, int kW, int kH, - int dT, int dW, int dH); -TH_API void THNN_(VolumetricAveragePooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int kT, int kW, int kH, - int dT, int dW, int dH); - -TH_API void THNN_(VolumetricConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, // [OPTIONAL] - THTensor *finput, - THTensor *fgradInput, - int dT, int dW, int dH, - int pT, int pW, int pH); -TH_API void THNN_(VolumetricConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - int dT, int dW, int dH, - int pT, int pW, int pH); -TH_API void THNN_(VolumetricConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, // [OPTIONAL] - THTensor *finput, - THTensor *fgradInput, - int dT, int dW, int dH, - int pT, int pW, int pH, - accreal scale); - -TH_API void THNN_(VolumetricConvolutionMM_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, // [OPTIONAL] - THTensor *finput, - int kT, int kW, int kH, - int dT, int dW, int dH, - int pT, int pW, int pH); -TH_API void THNN_(VolumetricConvolutionMM_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, - int kT, int kW, int kH, - int dT, int dW, int dH, - int pT, int pW, int pH); -TH_API void THNN_(VolumetricConvolutionMM_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, // [OPTIONAL] - THTensor *finput, - int kT, int kW, int kH, - int dT, int dW, int dH, - int pT, int pW, int pH, - accreal scale); - -TH_API void THNN_(VolumetricFractionalMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int outputT, int outputW, int outputH, - int poolSizeT, int poolSizeW, int poolSizeH, - THIndexTensor *indices, - THTensor *randomSamples); -TH_API void THNN_(VolumetricFractionalMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int outputT, int outputW, int outputH, - int poolSizeT, int poolSizeW, int poolSizeH, - THIndexTensor *indices); - -TH_API void THNN_(VolumetricFullConvolution_updateOutput)( - THNNState *state, // library state - THTensor *input, // 4D or 5D (batch) tensor - THTensor *output, // [OUT] volumetric convolution output - THTensor *weight, // weight tensor (nInputPlane x nOutputPlane x kT x kH x kW) - THTensor *bias, // [OPTIONAL] gradBias tensor (nOutputPlane) - THTensor *finput, // [OUT] internal columns buffer - THTensor *fgradInput, // [OUT] internal ones buffer - int dT, int dW, int dH, // stride of the convolution - int pT, int pW, int pH, // padding - int aT, int aW, int aH); // extra output adjustment -TH_API void THNN_(VolumetricFullConvolution_updateGradInput)( - THNNState *state, // library state - THTensor *input, // 4D or 5D (batch) tensor - THTensor *gradOutput, // gradient w.r.t. output - THTensor *gradInput, // [OUT] gradient w.r.t. input - THTensor *weight, // weight tensor (nInputPlane x nOutputPlane x kT x kH x kW) - THTensor *finput, // internal columns buffer - THTensor *fgradInput, // internal ones buffer - int dT, int dW, int dH, // stride - int pT, int pW, int pH, // padding - int aT, int aW, int aH); // extra output adjustment -TH_API void THNN_(VolumetricFullConvolution_accGradParameters)( - THNNState *state, // library state - THTensor *input, // 4D or 5D (batch) tensor - THTensor *gradOutput, // gradient w.r.t. output - THTensor *gradWeight, // gradWeight tensor (nInputPlane x nOutputPlane x kT x kH x kW) - THTensor *gradBias, // [OPTIONAL] gradBias tensor (nOutputPlane) - THTensor *finput, // internal columns buffer - THTensor *fgradInput, // internal ones buffer - int dT, int dW, int dH, // stride - int pT, int pW, int pH, // padding - int aT, int aW, int aH, // extra output adjustment - accreal scale); // scaling factor - -TH_API void THNN_(VolumetricDilatedConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, // [OPTIONAL] - THTensor *columns, - THTensor *ones, - int kT, int kW, int kH, - int dT, int dW, int dH, - int padT, int padW, int padH, - int dilationT, int dilationW, int dilationH); - -TH_API void THNN_(VolumetricDilatedConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *gradColumns, - int kT, int kW, int kH, - int dT, int dW, int dH, - int padT, int padW, int padH, - int dilationT, int dilationW, int dilationH); - -TH_API void THNN_(VolumetricDilatedConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, // [OPTIONAL] - THTensor *columns, - THTensor *ones, - int kT, int kW, int kH, - int dT, int dW, int dH, - int padT, int padW, int padH, - int dilationT, int dilationW, int dilationH, - accreal scale); - -TH_API void THNN_(VolumetricMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int kT, int kW, int kH, - int dT, int dW, int dH, - int pT, int pW, int pH, - bool ceilMode); -TH_API void THNN_(VolumetricMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int kT, int kW, int kH, - int dT, int dW, int dH, - int pT, int pW, int pH, - bool ceilMode); - -TH_API void THNN_(VolumetricDilatedMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int kT, int kW, int kH, - int dT, int dW, int dH, - int pT, int pW, int pH, - int dilationT, int dilationW, int dilationH, - bool ceilMode); -TH_API void THNN_(VolumetricDilatedMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int kT, int kW, int kH, - int dT, int dW, int dH, - int pT, int pW, int pH, - int dilationT, int dilationW, int dilationH, - bool ceilMode); - -TH_API void THNN_(VolumetricMaxUnpooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int oT, int oW, int oH, - int dT, int dW, int dH, - int pT, int pW, int pH); -TH_API void THNN_(VolumetricMaxUnpooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int oT, int oW, int oH, - int dT, int dW, int dH, - int pT, int pW, int pH); - -TH_API void THNN_(SpatialReflectionPadding_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int pad_l, int pad_r, - int pad_t, int pad_b); - -TH_API void THNN_(SpatialReflectionPadding_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int pad_l, int pad_r, - int pad_t, int pad_b); - -TH_API void THNN_(SpatialReplicationPadding_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int pad_l, int pad_r, - int pad_t, int pad_b); - -TH_API void THNN_(SpatialReplicationPadding_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int pad_l, int pad_r, - int pad_t, int pad_b); - -TH_API void THNN_(VolumetricReplicationPadding_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int pleft, int pright, - int ptop, int pbottom, - int pfront, int pback); - -TH_API void THNN_(VolumetricReplicationPadding_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int pleft, int pright, - int ptop, int pbottom, - int pfront, int pback); - -TH_API void THNN_(VolumetricUpSamplingNearest_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int scale_factor); -TH_API void THNN_(VolumetricUpSamplingNearest_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int scale_factor); - -TH_API void THNN_(VolumetricUpSamplingTrilinear_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int outputDepth, - int outputHeight, - int outputWidth); -TH_API void THNN_(VolumetricUpSamplingTrilinear_updateGradInput)( - THNNState *state, - THTensor *gradOutput, - THTensor *gradInput, - int nbatch, - int nchannels, - int inputDepth, - int inputHeight, - int inputWidth, - int outputDepth, - int outputHeight, - int outputWidth); - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/Tanh.c b/contrib/lua-torch/nn/lib/THNN/generic/Tanh.c deleted file mode 100644 index ecf0708c20..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/Tanh.c +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/Tanh.c" -#else - -void THNN_(Tanh_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output) -{ - THTensor_(tanh)(output, input); -} - -void THNN_(Tanh_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *output) -{ - THNN_CHECK_SHAPE(output, gradOutput); - THTensor_(resizeAs)(gradInput, output); - - if (output->nDimension == 1 || - !THTensor_(isContiguous)(output) || - !THTensor_(isContiguous)(gradOutput) || - !THTensor_(isContiguous)(gradInput)) - { - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, - real z = *output_data; \ - *gradInput_data = *gradOutput_data * (1. - z*z); - ); - } - else - { - real* ptr_gradOutput = THTensor_(data)(gradOutput); - real* ptr_gradInput = THTensor_(data)(gradInput); - real* ptr_output = THTensor_(data)(output); - long i; - -#pragma omp parallel for private(i) - for (i = 0; i < THTensor_(nElement)(gradInput); i++) - { - real z = ptr_output[i]; - ptr_gradInput[i] = ptr_gradOutput[i] * (1. - z*z); - } - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/TemporalConvolution.c b/contrib/lua-torch/nn/lib/THNN/generic/TemporalConvolution.c deleted file mode 100644 index 8cfd97d853..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/TemporalConvolution.c +++ /dev/null @@ -1,398 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/TemporalConvolution.c" -#else - -static inline void THNN_(TemporalConvolution_shapeCheck)( - THNNState *state, - THTensor *input, - int kW, - int dW, - int *inputFrameSize) { - - THArgCheck(kW > 0, 9, - "kernel size should be greater than zero, but got kW: %d", kW); - THArgCheck(dW > 0, 11, - "stride should be greater than zero, but got dW: %d", dW); - - int dimS = 0; // sequence dimension - int dimF = 1; // feature dimension - - if (input->nDimension == 3) - { - dimS = 1; - dimF = 2; - } - THNN_ARGCHECK(input->nDimension == 2 || input->nDimension == 3, 2, input, - "2D or 3D (batch mode) tensor expected for input, but got: %s"); - if (inputFrameSize != NULL) { - THArgCheck(input->size[dimF] == *inputFrameSize, 2, - "invalid input frame size. Got: %d, Expected: %d", - input->size[dimF], *inputFrameSize); - } - THArgCheck(input->size[dimS] >= kW, 2, - "input sequence smaller than kernel size. Got: %d, Expected: %d", - input->size[dimS], kW); -} - -void THNN_(TemporalConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - int kW, - int dW, - int inputFrameSize, - int outputFrameSize) -{ - THTensor *outputWindow, *inputWindow; - int nInputFrame, nOutputFrame; - long k, i; - - int dimS = 0; // sequence dimension - int dimF = 1; // feature dimension - - if (input->nDimension == 3) - { - dimS = 1; - dimF = 2; - } - - THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); - THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); - THNN_(TemporalConvolution_shapeCheck) - (state, input, kW, dW, &inputFrameSize); - input = THTensor_(newContiguous)(input); - outputWindow = THTensor_(new)(); - inputWindow = THTensor_(new)(); - - nInputFrame = input->size[dimS]; - nOutputFrame = (nInputFrame - kW) / dW + 1; - - if (input->nDimension == 2) - { - THTensor_(resize2d)(output, - nOutputFrame, - outputFrameSize); - - /* bias first */ - for(k = 0; k < nOutputFrame; k++) - { - THTensor_(select)(outputWindow, output, 0, k); - THTensor_(copy)(outputWindow, bias); - } - - /* ouch */ - for(k = 0; nOutputFrame > 0; k++) - { - long outputFrameStride = (kW-1)/dW+1; - long inputFrameStride = outputFrameStride*dW; - long nFrame = (nInputFrame-k*dW-kW)/inputFrameStride + 1; - nOutputFrame -= nFrame; - - THTensor_(setStorage2d)(inputWindow, input->storage, - input->storageOffset+k*dW*input->size[1], - nFrame, inputFrameStride*input->size[1], - kW*input->size[1], 1); - - THTensor_(setStorage2d)(outputWindow, output->storage, - output->storageOffset + k*output->size[1], - nFrame, outputFrameStride*output->size[1], - output->size[1], 1); - - THTensor *tweight = THTensor_(new)(); - THTensor_(transpose)(tweight, weight, 0, 1); - THTensor_(addmm)(outputWindow, 1, outputWindow, 1, inputWindow, tweight); - THTensor_(free)(tweight); - } - } - else - { - THTensor *outputSample = THTensor_(new)(); - THTensor *inputSample = THTensor_(new)(); - int nBatchFrame = input->size[0]; - - THTensor_(resize3d)(output, - nBatchFrame, - nOutputFrame, - outputFrameSize); - - for(i = 0; i < nBatchFrame; i++) - { - THTensor_(select)(outputSample, output, 0, i); - THTensor_(select)(inputSample, input, 0, i); - long nOutputSampleFrame = nOutputFrame; - - /* bias first */ - for(k = 0; k < nOutputFrame; k++) - { - THTensor_(select)(outputWindow, outputSample, 0, k); - THTensor_(copy)(outputWindow, bias); - } - - /* ouch */ - for(k = 0; nOutputSampleFrame > 0; k++) - { - long outputFrameStride = (kW-1)/dW+1; - long inputFrameStride = outputFrameStride*dW; - long nFrame = (nInputFrame-k*dW-kW)/inputFrameStride + 1; - nOutputSampleFrame -= nFrame; - - THTensor_(setStorage2d)(inputWindow, inputSample->storage, - inputSample->storageOffset+k*dW*inputSample->size[1], - nFrame, inputFrameStride*inputSample->size[1], - kW*inputSample->size[1], 1); - - THTensor_(setStorage2d)(outputWindow, outputSample->storage, - outputSample->storageOffset + k*outputSample->size[1], - nFrame, outputFrameStride*outputSample->size[1], - outputSample->size[1], 1); - - THTensor *tweight = THTensor_(new)(); - THTensor_(transpose)(tweight, weight, 0, 1); - THTensor_(addmm)(outputWindow, 1, outputWindow, 1, inputWindow, tweight); - THTensor_(free)(tweight); - } - } - THTensor_(free)(outputSample); - THTensor_(free)(inputSample); - } - - THTensor_(free)(outputWindow); - THTensor_(free)(inputWindow); - THTensor_(free)(input); - -} - -void THNN_(TemporalConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - int kW, - int dW) -{ - long nInputFrame; - long nOutputFrame; - - THTensor *gradOutputWindow; - THTensor *gradInputWindow; - long k, i; - - int dimS = 0; // sequence dimension - int dimF = 1; // feature dimension - - if (gradOutput->nDimension == 3) - { - dimS = 1; - dimF = 2; - } - - THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); - THNN_(TemporalConvolution_shapeCheck)( - state, input, kW, dW, NULL); - nInputFrame = input->size[dimS]; - nOutputFrame = gradOutput->size[dimS]; - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - - gradOutputWindow = THTensor_(new)(); - gradInputWindow = THTensor_(new)(); - - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - if (gradOutput->nDimension == 2) - { - /* ouch */ - for(k = 0; nOutputFrame > 0; k++) - { - long outputFrameStride = (kW-1)/dW+1; - long inputFrameStride = outputFrameStride*dW; - long nFrame = (nInputFrame-k*dW-kW)/inputFrameStride + 1; - nOutputFrame -= nFrame; - - THTensor_(setStorage2d)(gradOutputWindow, gradOutput->storage, - gradOutput->storageOffset + k*gradOutput->size[1], - nFrame, outputFrameStride*gradOutput->size[1], - gradOutput->size[1], 1); - - THTensor_(setStorage2d)(gradInputWindow, gradInput->storage, - gradInput->storageOffset+k*dW*gradInput->size[1], - nFrame, inputFrameStride*gradInput->size[1], - kW*gradInput->size[1], 1); - - THTensor_(addmm)(gradInputWindow, 1, gradInputWindow, 1, gradOutputWindow, weight); - } - } - else - { - THTensor *gradOutputSample = THTensor_(new)(); - THTensor *gradInputSample = THTensor_(new)(); - int nBatchFrame = input->size[0]; - - for(i = 0; i < nBatchFrame; i++) - { - THTensor_(select)(gradOutputSample, gradOutput, 0, i); - THTensor_(select)(gradInputSample, gradInput, 0, i); - int nOutputSampleFrame = nOutputFrame; - - /* ouch */ - for(k = 0; nOutputSampleFrame > 0; k++) - { - long outputFrameStride = (kW-1)/dW+1; - long inputFrameStride = outputFrameStride*dW; - long nFrame = (nInputFrame-k*dW-kW)/inputFrameStride + 1; - nOutputSampleFrame -= nFrame; - - THTensor_(setStorage2d)(gradOutputWindow, gradOutputSample->storage, - gradOutputSample->storageOffset + k*gradOutputSample->size[1], - nFrame, outputFrameStride*gradOutputSample->size[1], - gradOutputSample->size[1], 1); - - THTensor_(setStorage2d)(gradInputWindow, gradInputSample->storage, - gradInputSample->storageOffset+k*dW*gradInputSample->size[1], - nFrame, inputFrameStride*gradInputSample->size[1], - kW*gradInputSample->size[1], 1); - - THTensor_(addmm)(gradInputWindow, 1, gradInputWindow, 1, gradOutputWindow, weight); - } - } - THTensor_(free)(gradOutputSample); - THTensor_(free)(gradInputSample); - } - - THTensor_(free)(gradOutputWindow); - THTensor_(free)(gradInputWindow); - THTensor_(free)(gradOutput); - THTensor_(free)(input); - -} - -void THNN_(TemporalConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - int kW, - int dW, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - long nInputFrame; - long nOutputFrame; - - THTensor *gradOutputWindow; - THTensor *inputWindow; - long k, i; - - int dimS = 0; // sequence dimension - int dimF = 1; // feature dimension - - if (gradOutput->nDimension == 3) - { - dimS = 1; - dimF = 2; - } - - THNN_(TemporalConvolution_shapeCheck)( - state, input, kW, dW, NULL); - nInputFrame = input->size[dimS]; - nOutputFrame = gradOutput->size[dimS]; - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - gradOutputWindow = THTensor_(new)(); - inputWindow = THTensor_(new)(); - - if (input->nDimension == 2) - { - /* bias first */ - for(k = 0; k < nOutputFrame; k++) - { - THTensor_(select)(gradOutputWindow, gradOutput, 0, k); - THTensor_(cadd)(gradBias, gradBias, scale, gradOutputWindow); - } - - /* ouch */ - for(k = 0; nOutputFrame > 0; k++) - { - long outputFrameStride = (kW-1)/dW+1; - long inputFrameStride = outputFrameStride*dW; - long nFrame = (nInputFrame-k*dW-kW)/inputFrameStride + 1; - nOutputFrame -= nFrame; - - THTensor_(setStorage2d)(inputWindow, input->storage, - input->storageOffset+k*dW*input->size[1], - nFrame, inputFrameStride*input->size[1], - kW*input->size[1], 1); - - THTensor_(setStorage2d)(gradOutputWindow, gradOutput->storage, - gradOutput->storageOffset + k*gradOutput->size[1], - nFrame, outputFrameStride*gradOutput->size[1], - gradOutput->size[1], 1); - - THTensor *tgradOutputWindow = THTensor_(new)(); - THTensor_(transpose)(tgradOutputWindow, gradOutputWindow, 0, 1); - THTensor_(addmm)(gradWeight, 1, gradWeight, scale, tgradOutputWindow, inputWindow); - THTensor_(free)(tgradOutputWindow); - } - } - else - { - THTensor *gradOutputSample = THTensor_(new)(); - THTensor *inputSample = THTensor_(new)(); - int nBatchFrame = input->size[0]; - - for(i = 0; i < nBatchFrame; i++) - { - THTensor_(select)(gradOutputSample, gradOutput, 0, i); - THTensor_(select)(inputSample, input, 0, i); - int nOutputSampleFrame = nOutputFrame; - - /* bias first */ - for(k = 0; k < nOutputFrame; k++) - { - THTensor_(select)(gradOutputWindow, gradOutputSample, 0, k); - THTensor_(cadd)(gradBias, gradBias, scale, gradOutputWindow); - } - - /* ouch */ - for(k = 0; nOutputSampleFrame > 0; k++) - { - long outputFrameStride = (kW-1)/dW+1; - long inputFrameStride = outputFrameStride*dW; - long nFrame = (nInputFrame-k*dW-kW)/inputFrameStride + 1; - nOutputSampleFrame -= nFrame; - - THTensor_(setStorage2d)(inputWindow, inputSample->storage, - inputSample->storageOffset+k*dW*inputSample->size[1], - nFrame, inputFrameStride*inputSample->size[1], - kW*inputSample->size[1], 1); - - THTensor_(setStorage2d)(gradOutputWindow, gradOutputSample->storage, - gradOutputSample->storageOffset + k*gradOutputSample->size[1], - nFrame, outputFrameStride*gradOutputSample->size[1], - gradOutputSample->size[1], 1); - - THTensor *tgradOutputWindow = THTensor_(new)(); - THTensor_(transpose)(tgradOutputWindow, gradOutputWindow, 0, 1); - THTensor_(addmm)(gradWeight, 1, gradWeight, scale, tgradOutputWindow, inputWindow); - THTensor_(free)(tgradOutputWindow); - } - } - THTensor_(free)(gradOutputSample); - THTensor_(free)(inputSample); - } - - THTensor_(free)(gradOutputWindow); - THTensor_(free)(inputWindow); - THTensor_(free)(gradOutput); - THTensor_(free)(input); - -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/TemporalMaxPooling.c b/contrib/lua-torch/nn/lib/THNN/generic/TemporalMaxPooling.c deleted file mode 100644 index 344c1b3fdb..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/TemporalMaxPooling.c +++ /dev/null @@ -1,283 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/TemporalMaxPooling.c" -#else - -static inline void THNN_(TemporalMaxPooling_shapeCheck)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THIndexTensor *indices, - int kW, - int dW) { - long niframe; - long framesize; - long noframe; - - int dimS = 0; // sequence dimension - int dimF = 1; // feature dimension - int ndims = input->nDimension; - - if (input->nDimension == 3) - { - dimS = 1; - dimF = 2; - } - - niframe = input->size[dimS]; - framesize = input->size[dimF]; - noframe = (niframe - kW) / dW + 1; - - THArgCheck(kW > 0, 5, - "kernel size should be greater than zero, but got kW: %d", kW); - THArgCheck(dW > 0, 6, - "stride should be greater than zero, but got dW: %d", dW); - - THNN_ARGCHECK(input->nDimension == 2 || input->nDimension == 3, 2, input, - "2D or 3D (batch mode) tensor expected for input, but got: %s"); - THArgCheck(input->size[dimS] >= kW, 2, - "input sequence smaller than kernel size. Got: %d, Expected: %d", - input->size[dimS], kW); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndims, dimS, noframe); - THNN_CHECK_DIM_SIZE(gradOutput, ndims, dimF, framesize) - } - if (indices != NULL) { - THNN_CHECK_DIM_SIZE_INDICES(indices, ndims, dimS, noframe); - THNN_CHECK_DIM_SIZE_INDICES(indices, ndims, dimF, framesize); - } -} - -void THNN_(TemporalMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int kW, - int dW) -{ - long niframe; - long framesize; - long noframe; - - real *input_data; - real *output_data; - THIndex_t *indices_data; - - long t, y; - - int dimS = 0; // sequence dimension - int dimF = 1; // feature dimension - - THNN_(TemporalMaxPooling_shapeCheck)(state, input, NULL, NULL, kW, dW); - - if (input->nDimension == 3) - { - dimS = 1; - dimF = 2; - } - - /* sizes */ - niframe = input->size[dimS]; - framesize = input->size[dimF]; - noframe = (niframe - kW) / dW + 1; - - /* get contiguous input */ - input = THTensor_(newContiguous)(input); - - if (input->nDimension == 2) - { - /* resize output */ - THTensor_(resize2d)(output, noframe, framesize); - - /* indices will contain index locations for each output point */ - THIndexTensor_(resize2d)(indices, noframe, framesize); - - /* get raw pointers */ - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - - for(t = 0; t < noframe; t++) - { - real *ip = input_data + t*framesize*dW; - real *op = output_data + t*framesize; - THIndex_t *xp = indices_data + t*framesize; -#pragma omp parallel for private(y) - for(y = 0; y < framesize; y++) - { - /* compute local max: */ - long maxindex = -1; - real maxval = -THInf; - long x; - for(x = 0; x < kW; x++) - { - real val = ip[x*framesize+y]; - if (val > maxval) - { - maxval = val; - maxindex = x; - } - } - - /* set output to local max */ - op[y] = maxval; - xp[y] = (real)maxindex; - } - } - } - else - { - /* number of batch frames */ - long nbframe = input->size[0]; - long i; - - /* resize output */ - THTensor_(resize3d)(output, nbframe, noframe, framesize); - - /* indices will contain index locations for each output point */ - THIndexTensor_(resize3d)(indices, nbframe, noframe, framesize); - - /* get raw pointers */ - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - - for(i = 0; i < nbframe; i++) - { - real *inputSample_data = input_data + i*niframe*framesize; - real *outputSample_data = output_data + i*noframe*framesize; - THIndex_t *indicesSample_data = indices_data + i*noframe*framesize; - - for(t = 0; t < noframe; t++) - { - real *ip = inputSample_data + t*framesize*dW; - real *op = outputSample_data + t*framesize; - THIndex_t *xp = indicesSample_data + t*framesize; - -#pragma omp parallel for private(y) - for(y = 0; y < framesize; y++) - { - /* compute local max: */ - long maxindex = -1; - real maxval = -THInf; - long x; - for(x = 0; x < kW; x++) - { - real val = ip[x*framesize+y]; - if (val > maxval) - { - maxval = val; - maxindex = x; - } - } - - /* set output to local max */ - op[y] = maxval; - xp[y] = (real)maxindex; - } - } - } - } - - /* cleanup */ - THTensor_(free)(input); - -} - -void THNN_(TemporalMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int kW, - int dW) -{ - long niframe; - int noframe; - long framesize; - - real *gradInput_data; - real *gradOutput_data; - THIndex_t *indices_data; - - long t, y; - - THNN_(TemporalMaxPooling_shapeCheck)(state, input, gradOutput, indices, kW, dW); - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize and zero */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - int dimS = 0; // sequence dimension - int dimF = 1; // feature dimension - - if (input->nDimension == 3) - { - dimS = 1; - dimF = 2; - } - /* sizes */ - niframe = input->size[dimS]; - noframe = gradOutput->size[dimS]; - framesize = gradOutput->size[dimF]; - - /* get raw pointers */ - gradInput_data = THTensor_(data)(gradInput); - gradOutput_data = THTensor_(data)(gradOutput); - indices_data = THIndexTensor_(data)(indices); - - if (input->nDimension == 2) - { - for(t = 0; t < noframe; t++) - { - real *gip = gradInput_data + t*framesize*dW; - real *gop = gradOutput_data + t*framesize; - THIndex_t *xp = indices_data + t*framesize; -#pragma omp parallel for private(y) - for(y = 0; y < framesize; y++) - { - /* compute local max: */ - long maxindex = (long)xp[y]; - if (maxindex != -1) - gip[maxindex*framesize+y] += gop[y]; - } - } - } - else - { - /* number of batch frames */ - long nbframe = input->size[0]; - long i; - - for(i = 0; i < nbframe; i++) - { - real *gradInputSample_data = gradInput_data + i*niframe*framesize; - real *gradOutputSample_data = gradOutput_data + i*noframe*framesize; - THIndex_t *indicesSample_data = indices_data + i*noframe*framesize; - - for(t = 0; t < noframe; t++) - { - real *gip = gradInputSample_data + t*framesize*dW; - real *gop = gradOutputSample_data + t*framesize; - THIndex_t *xp = indicesSample_data + t*framesize; -#pragma omp parallel for private(y) - for(y = 0; y < framesize; y++) - { - /* compute local max: */ - long maxindex = (long)xp[y]; - if (maxindex != -1) - gip[maxindex*framesize+y] += gop[y]; - } - } - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/TemporalRowConvolution.c b/contrib/lua-torch/nn/lib/THNN/generic/TemporalRowConvolution.c deleted file mode 100644 index e3ae41e22e..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/TemporalRowConvolution.c +++ /dev/null @@ -1,472 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/TemporalRowConvolution.c" -#else - -static inline void THNN_(TemporalRowConvolution_shapeCheck)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *weight, - THTensor *bias, - int kW, - int dW, - int padW) { - - THArgCheck(kW > 0, 5, - "kernel size should be greater than zero, but got kW: %d", kW); - THArgCheck(dW > 0, 6, - "stride should be greater than zero, but got dW: %d", dW); - THNN_ARGCHECK(weight->nDimension == 3, 3, weight, - "3D weight tensor expected, but got: %s"); - THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); - THArgCheck(!bias || THTensor_(isContiguous)(bias), 5, "bias must be contiguous"); - - if (bias != NULL) { - THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]); - } - - // we're always looking at (possibly batch) x feats x seq - int ndim = input->nDimension; - int dimF = 0; - int dimS = 1; - - if (ndim == 3) { - ++dimS; - ++dimF; - } - - THNN_ARGCHECK(ndim == 2 || ndim == 3, 1, input, - "2D or 3D (batch mode) input tensor expected, but got :%s"); - - long inputFrameSize = weight->size[0]; - long nInputFrame = input->size[dimS]; - long nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; - - if (nOutputFrame < 1) { - THError("Given input size: (%d x %d). " - "Calculated output size: (%d x %d). Output size is too small", - inputFrameSize, nInputFrame, inputFrameSize, nOutputFrame); - } - - THNN_CHECK_DIM_SIZE(input, ndim, dimF, inputFrameSize); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimF, inputFrameSize); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimS, nOutputFrame); - } -} - -static void THNN_(unfolded_acc_row)( - THTensor *finput, - THTensor *input, - int kW, - int dW, - int padW, - long inputFrameSize, - long nInputFrame, - long nOutputFrame) { - - size_t c; - real *input_data = THTensor_(data)(input); - real *finput_data = THTensor_(data)(finput); - -// #pragma omp parallel for private(c) - for (c = 0; c < inputFrameSize; c++) { - size_t kw, x; - long long ix = 0; - - for (kw = 0; kw < kW; kw++) { - real *src = finput_data - + c * (kW * nOutputFrame) - + kw * (nOutputFrame); - real *dst = input_data + c * (nInputFrame); - - ix = (long long)(kw); - if (dW == 1) { - real *dst_slice = dst + (size_t)(ix); - THVector_(cadd)(dst_slice, dst_slice, src, 1, nOutputFrame); - } else { - for (x = 0; x < nOutputFrame; x++) { - real *dst_slice = dst + (size_t)(ix + x * dW); - THVector_(cadd)(dst_slice, dst_slice, - src + (size_t)(x), 1, 1); - } - } - } - } -} - -static void THNN_(unfolded_copy_row)( - THTensor *finput, - THTensor *input, - int kW, - int dW, - int padW, - long inputFrameSize, - long nInputFrame, - long nOutputFrame) { - - long k; - real *input_data = THTensor_(data)(input); - real *finput_data = THTensor_(data)(finput); - -// #pragma omp parallel for private(k) - for (k = 0; k < inputFrameSize * kW; k++) { - size_t c = k / kW; - size_t rest = k % kW; - size_t kw = rest % kW; - size_t x; - long long ix; - real *dst = finput_data + c * (kW * nOutputFrame) + kw * (nOutputFrame); - real *src = input_data + c * (nInputFrame); - - ix = (long long)(kw); - if (dW == 1) { - memcpy(dst, src+(size_t)(ix), sizeof(real) * (nOutputFrame)); - } else { - for (x = 0; x < nOutputFrame; x++) { - memcpy(dst + (size_t)(x), src + (size_t)(ix + x * dW), - sizeof(real) * 1); - } - } - } -} - -static void THNN_(TemporalRowConvolution_updateOutput_frame)( - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - int kW, - int dW, - int padW, - long inputFrameSize, - long nInputFrame, - long nOutputFrame) { - - long i; - - THTensor *output3d = THTensor_(newWithStorage3d)( - output->storage, output->storageOffset, - inputFrameSize, -1, - 1, -1, - nOutputFrame, -1); - - THNN_(unfolded_copy_row)(finput, input, kW, dW, padW, - inputFrameSize, nInputFrame, nOutputFrame); - - THTensor_(zero)(output); - - if (bias != NULL) { - for (i = 0; i < inputFrameSize; i++) - THVector_(fill) - (output->storage->data + output->storageOffset - + output->stride[0] * i, - THTensor_(get1d)(bias, i), nOutputFrame); - } - - THTensor_(baddbmm)(output3d, 1, output3d, 1, weight, finput); - - THTensor_(free)(output3d); -} - -void THNN_(TemporalRowConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - THTensor *fgradInput, // unused here but needed for Cuda - int kW, - int dW, - int padW, - bool featFirst) { - - int ndim = input->nDimension; - - THTensor *tinput; - if (!featFirst) { - tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); - input = THTensor_(newContiguous)(tinput); - } else { - input = THTensor_(newContiguous)(input); - } - - THNN_(TemporalRowConvolution_shapeCheck)( - state, input, NULL, weight, bias, kW, dW, padW); - - long inputFrameSize = weight->size[0]; - long nInputFrame = input->size[ndim - 1]; - long nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; - - if (ndim == 2) { /* non-batch mode */ - - THTensor_(resize3d)(finput, inputFrameSize, kW, nOutputFrame); - THTensor_(resize2d)(output, inputFrameSize, nOutputFrame); - - THTensor_(zero)(finput); - THTensor_(zero)(output); - - THNN_(TemporalRowConvolution_updateOutput_frame) - (input, output, weight, bias, finput, - kW, dW, padW, - inputFrameSize, nInputFrame, nOutputFrame); - - } else { - long T = input->size[0]; - long t; - - THTensor_(resize4d)(finput, T, inputFrameSize, kW, nOutputFrame); - THTensor_(resize3d)(output, T, inputFrameSize, nOutputFrame); - - THTensor_(zero)(finput); - THTensor_(zero)(output); - -#pragma omp parallel for private(t) - for (t = 0; t < T; t++) { - THTensor *input_t = THTensor_(newSelect)(input, 0, t); - THTensor *output_t = THTensor_(newSelect)(output, 0, t); - THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); - - THNN_(TemporalRowConvolution_updateOutput_frame) - (input_t, output_t, weight, bias, finput_t, - kW, dW, padW, inputFrameSize, nInputFrame, nOutputFrame); - - THTensor_(free)(input_t); - THTensor_(free)(output_t); - THTensor_(free)(finput_t); - } - } - - if (!featFirst) { // NOTE: output will NOT be contiguous in this case - THTensor_(transpose)(output, output, ndim - 1, ndim - 2); - THTensor_(free)(tinput); - } - - THTensor_(free)(input); -} - -static void THNN_(TemporalRowConvolution_updateGradInput_frame)( - THTensor *gradInput, - THTensor *gradOutput, - THTensor *weight, - THTensor *fgradInput, - int kW, - int dW, - int padW, - long inputFrameSize, - long nInputFrame, - long nOutputFrame) { - - THTensor *gradOutput3d = THTensor_(newWithStorage3d)( - gradOutput->storage, gradOutput->storageOffset, - inputFrameSize, -1, - 1, -1, - nOutputFrame, -1); - - // weight: inputFrameSize x kW x 1 - // gradOutput3d: inputFrameSize x 1 x nOutputFrame - THTensor_(baddbmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput3d); - // fgradInput: inputFrameSize x kW x nOutputFrame - THTensor_(free)(gradOutput3d); - - THTensor_(zero)(gradInput); - - THNN_(unfolded_acc_row)(fgradInput, gradInput, - kW, dW, padW, - inputFrameSize, nInputFrame, nOutputFrame); -} - -void THNN_(TemporalRowConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, - int kW, - int dW, - int padW, - bool featFirst) { - - int ndim = input->nDimension; - - THTensor *tinput, *tgradOutput; - - if (!featFirst) { - tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); - tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2); - - input = THTensor_(newContiguous)(tinput); - gradOutput = THTensor_(newContiguous)(tgradOutput); - - } else { - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - } - - THNN_(TemporalRowConvolution_shapeCheck)(state, input, gradOutput, weight, - NULL, kW, dW, padW); - - long inputFrameSize = weight->size[0]; - long nInputFrame = input->size[ndim - 1]; - long nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; - - THTensor_(resizeAs)(fgradInput, finput); - THTensor_(resizeAs)(gradInput, input); - - THTensor_(zero)(fgradInput); - THTensor_(zero)(gradInput); - - THTensor *tweight = THTensor_(new)(); - THTensor_(transpose)(tweight, weight, 1, 2); - - if (ndim == 2) { - THNN_(TemporalRowConvolution_updateGradInput_frame) - (gradInput, gradOutput, tweight, fgradInput, - kW, dW, padW, - inputFrameSize, nInputFrame, nOutputFrame); - } else { - long T = input->size[0]; - long t; - -#pragma omp parallel for private(t) - for (t = 0; t < T; t++) { - - THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); - THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); - THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); - - THNN_(TemporalRowConvolution_updateGradInput_frame) - (gradInput_t, gradOutput_t, tweight, fgradInput_t, - kW, dW, padW, - inputFrameSize, nInputFrame, nOutputFrame); - - THTensor_(free)(gradInput_t); - THTensor_(free)(gradOutput_t); - THTensor_(free)(fgradInput_t); - } - } - - THTensor_(free)(tweight); - - if (!featFirst) { // NOTE: gradInput will NOT be contiguous in this case - - THTensor_(free)(tinput); - THTensor_(free)(tgradOutput); - - THTensor_(transpose)(gradInput, gradInput, ndim - 1, ndim - 2); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); - -} - -static void THNN_(TemporalRowConvolution_accGradParameters_frame)( - THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, - THTensor *finput, real scale) { - - long i; - THTensor *gradOutput3d = THTensor_(newWithStorage3d)( - gradOutput->storage, gradOutput->storageOffset, - gradOutput->size[0], -1, - 1, -1, - gradOutput->size[1], -1); - - THTensor *tfinput = THTensor_(new)(); - THTensor_(transpose)(tfinput, finput, 1, 2); - // gradOutput3d: inputFrameSize x 1 x nOutputFrame - // finput: inputFrameSize x nOutputFrame x kW - THTensor_(baddbmm)(gradWeight, 1, gradWeight, scale, gradOutput3d, tfinput); - // gradWeight: inputFrameSize x 1 x kW - THTensor_(free)(tfinput); - - if (gradBias != NULL) { - for (i = 0; i < gradBias->size[0]; i++) { - long k; - real sum = 0; - real *data = gradOutput3d->storage->data - + gradOutput3d->storageOffset - + i * gradOutput3d->stride[0]; - for (k = 0; k < gradOutput3d->size[2]; k++) { - sum += data[k]; - } - (gradBias->storage->data + gradBias->storageOffset)[i] - += scale * sum; - } - } - - THTensor_(free)(gradOutput3d); - -} - -void THNN_(TemporalRowConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - THTensor *fgradInput, - int kW, - int dW, - int padW, - bool featFirst, - accreal scale_) { - - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - int ndim = input->nDimension; - - THTensor *tinput, *tgradOutput; - - if (!featFirst) { - tinput = THTensor_(newTranspose)(input, ndim - 1, ndim - 2); - tgradOutput = THTensor_(newTranspose)(gradOutput, ndim - 1, ndim - 2); - - input = THTensor_(newContiguous)(tinput); - gradOutput = THTensor_(newContiguous)(tgradOutput); - } else { - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - } - - THNN_(TemporalRowConvolution_shapeCheck) - (state, input, gradOutput, gradWeight, gradBias, kW, dW, padW); - - long inputFrameSize = gradWeight->size[0]; - long nInputFrame = input->size[ndim - 1]; - long nOutputFrame = (nInputFrame + 2 * padW - kW) / dW + 1; - - if (ndim == 2) { - THNN_(TemporalRowConvolution_accGradParameters_frame)( - gradOutput, gradWeight, gradBias, finput, scale); - } else { - long T = input->size[0]; - long t; - - for (t = 0; t < T; t++) { - THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); - THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); - - THNN_(TemporalRowConvolution_accGradParameters_frame)( - gradOutput_t, gradWeight, gradBias, finput_t, scale); - - THTensor_(free)(gradOutput_t); - THTensor_(free)(finput_t); - } - } - - if (!featFirst) { - THTensor_(free)(tinput); - THTensor_(free)(tgradOutput); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/TemporalSubSampling.c b/contrib/lua-torch/nn/lib/THNN/generic/TemporalSubSampling.c deleted file mode 100644 index 68f35e28a7..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/TemporalSubSampling.c +++ /dev/null @@ -1,156 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/TemporalSubSampling.c" -#else - -static inline void THNN_(TemporalSubSampling_shapeCheck)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - int kW, - int dW, - int *inputFrameSize) { - int nInputFrame, nOutputFrame; - - THArgCheck(kW > 0, 6, - "kernel size should be greater than zero, but got kW: %d", kW); - THArgCheck(dW > 0, 7, - "stride should be greater than zero, but got dW: %d", dW); - - THNN_ARGCHECK(input->nDimension == 2, 2, input, - "2D or 3D (batch mode) tensor expected for input, but got: %s"); - if (inputFrameSize != NULL) { - THArgCheck( input->size[1] == *inputFrameSize, 2, - "invalid input frame size. Got: %d, Expected: %d", - input->size[1], *inputFrameSize); - } - THArgCheck( input->size[0] >= kW, 2, - "input sequence smaller than kernel size. Got %d, Expected: %d", - input->size[0], kW); - - nInputFrame = input->size[0]; - nOutputFrame = (nInputFrame - kW) / dW + 1; - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, input->nDimension, 0, nOutputFrame); - if (inputFrameSize != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, input->nDimension, 1, *inputFrameSize); - } - } -} - -void THNN_(TemporalSubSampling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - int kW, - int dW, - int inputFrameSize) -{ - THTensor *outputFrame, *inputWindow; - int nInputFrame, nOutputFrame; - long k; - - THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); - THArgCheck(!bias || THTensor_(isContiguous)(bias), 4, "bias must be contiguous"); - THNN_(TemporalSubSampling_shapeCheck)(state, input, NULL, kW, dW, &inputFrameSize); - - outputFrame = THTensor_(new)(); - inputWindow = THTensor_(new)(); - - nInputFrame = input->size[0]; - nOutputFrame = (nInputFrame - kW) / dW + 1; - - THTensor_(resize2d)(output, - nOutputFrame, - inputFrameSize); - - for(k = 0; k < nOutputFrame; k++) - { - THTensor_(narrow)(inputWindow, input, 0, k*dW, kW); - THTensor_(select)(outputFrame, output, 0, k); - THTensor_(sum)(outputFrame, inputWindow, 0, 1); - THTensor_(cmul)(outputFrame, outputFrame, weight); - THTensor_(cadd)(outputFrame, outputFrame, 1, bias); - } - - THTensor_(free)(outputFrame); - THTensor_(free)(inputWindow); -} - -void THNN_(TemporalSubSampling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - int kW, - int dW) -{ - - THTensor *gradOutputFrame; - THTensor *gradInputWindow, *buffer, *kwunit; - long k; - - THArgCheck(THTensor_(isContiguous)(weight), 4, "weight must be contiguous"); - THNN_(TemporalSubSampling_shapeCheck)(state, input, gradOutput, kW, dW, NULL); - - gradOutputFrame = THTensor_(new)(); - gradInputWindow = THTensor_(new)(); - buffer = THTensor_(new)(); - kwunit = THTensor_(newWithSize1d)(kW); - - THTensor_(fill)(kwunit, 1); - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - for(k = 0; k < gradOutput->size[0]; k++) - { - THTensor_(narrow)(gradInputWindow, gradInput, 0, k*dW, kW); - THTensor_(select)(gradOutputFrame, gradOutput, 0, k); - THTensor_(cmul)(buffer, weight, gradOutputFrame); - THTensor_(addr)(gradInputWindow, 1, gradInputWindow, 1, kwunit, buffer); - } - - THTensor_(free)(gradOutputFrame); - THTensor_(free)(gradInputWindow); - THTensor_(free)(buffer); - THTensor_(free)(kwunit); -} - -void THNN_(TemporalSubSampling_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - int kW, - int dW, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - THTensor *gradOutputFrame; - THTensor *inputWindow, *buffer; - long k; - - THNN_(TemporalSubSampling_shapeCheck)(state, input, gradOutput, kW, dW, NULL); - gradOutputFrame = THTensor_(new)(); - inputWindow = THTensor_(new)(); - buffer = THTensor_(new)(); - - for(k = 0; k < gradOutput->size[0]; k++) - { - THTensor_(narrow)(inputWindow, input, 0, k*dW, kW); - THTensor_(select)(gradOutputFrame, gradOutput, 0, k); - THTensor_(sum)(buffer, inputWindow, 0, 1); - THTensor_(addcmul)(gradWeight, gradWeight, scale, buffer, gradOutputFrame); - THTensor_(cadd)(gradBias, gradBias, scale, gradOutputFrame); - } - - THTensor_(free)(gradOutputFrame); - THTensor_(free)(inputWindow); - THTensor_(free)(buffer); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/Threshold.c b/contrib/lua-torch/nn/lib/THNN/generic/Threshold.c deleted file mode 100644 index 949c7a07cb..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/Threshold.c +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/Threshold.c" -#else - -void THNN_(Threshold_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - accreal threshold_, - accreal val_, - bool inplace) -{ - real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); - real val = TH_CONVERT_ACCREAL_TO_REAL(val_); - if (inplace) - { - TH_TENSOR_APPLY(real, input, - if (*input_data <= threshold) - *input_data = val; - ); - THTensor_(set)(output, input); - } - else - { - THTensor_(resizeAs)(output, input); - TH_TENSOR_APPLY2(real, output, real, input, - *output_data = (*input_data > threshold) ? *input_data : val; - ); - } -} - -void THNN_(Threshold_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - accreal threshold_, - accreal val_, - bool inplace) -{ - real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_); - real val = TH_CONVERT_ACCREAL_TO_REAL(val_); - THNN_CHECK_NELEMENT(input, gradOutput); - if (inplace) - { - TH_TENSOR_APPLY2(real, gradOutput, real, input, - if ((*input_data) <= threshold) - *gradOutput_data = 0; - ); - THTensor_(set)(gradInput, gradOutput); - } - else - { - THTensor_(resizeAs)(gradInput, input); - TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, - if ((*input_data) > threshold) - *gradInput_data = *gradOutput_data; - else - *gradInput_data = 0; - ); - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricAveragePooling.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricAveragePooling.c deleted file mode 100644 index 91c870e6f9..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricAveragePooling.c +++ /dev/null @@ -1,373 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricAveragePooling.c" -#else - -static inline void THNN_(VolumetricAveragePooling_shapeCheck)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH) { - long nslices; - long itime; - long iheight; - long iwidth; - long otime; - long oheight; - long owidth; - int ndim = input->nDimension; - int dimN = 0; - int dimt = 1; - int dimh = 2; - int dimw = 3; - - if (input->nDimension == 5) - { - dimN++; - dimt++; - dimh++; - dimw++; - } - - THArgCheck(kT > 0 && kW > 0 && kH > 0, 5, - "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", - kT, kH, kW); - THArgCheck(dT > 0 && dW > 0 && dH > 0, 8, - "stride should be greater than zero, but got dT: %d dH: %d dW: %d", - dT, dH, dW); - THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, - "4D or 5D (batch mode) tensor expected for input, but got: %s"); - - THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH - && input->size[dimt] >= kT, 2, - "input image (T: %d H: %d W: %d) smaller than " - "kernel size (kT: %d kH: %d kW: %d)", - input->size[dimt], input->size[dimh], input->size[dimw], - kT, kH, kW); - - /* sizes */ - nslices = input->size[dimN]; - itime = input->size[dimt]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - otime = (itime - kT) / dT + 1; - oheight = (iheight - kH) / dH + 1; - owidth = (iwidth - kW) / dW + 1; - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth); - } -} - -static void THNN_(VolumetricAveragePooling_updateOutput_frame)( - real *input_p, - real *output_p, - long nslices, - long itime, - long iwidth, - long iheight, - long otime, - long owidth, - long oheight, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH) -{ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - /* loop over output */ - long i, j, ti; - for (ti = 0; ti < otime; ti++) - { - for (i = 0; i < oheight; i++) - { - for (j = 0; j < owidth; j++) - { - /* local pointers */ - real *ip = input_p + k * itime * iwidth * iheight - + ti * iwidth * iheight * dT + i * iwidth * dH + j * dW; - real *op = output_p + k * otime * owidth * oheight - + ti * owidth * oheight + i * owidth + j; - - /* compute local sum: */ - real sum = 0.0; - int x, y, z; - - for (z=0; z < kT; z++) - { - for (y = 0; y < kH; y++) - { - for (x = 0; x < kW; x++) - { - sum += *(ip + z * iwidth * iheight + y * iwidth + x); - } - } - } - - /* set output to local max */ - *op = sum / (kT * kW * kH); - } - } - } - } -} - -void THNN_(VolumetricAveragePooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH) -{ - long nslices; - long itime; - long iheight; - long iwidth; - long otime; - long oheight; - long owidth; - real *input_data; - real *output_data; - - THNN_(VolumetricAveragePooling_shapeCheck)( - state, input, NULL, kT, kW, kH, - dT, dW, dH); - - int dimN = 0; - int dimt = 1; - int dimh = 2; - int dimw = 3; - - if (input->nDimension == 5) - { - dimN++; - dimt++; - dimh++; - dimw++; - } - - /* sizes */ - nslices = input->size[dimN]; - itime = input->size[dimt]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - otime = (itime - kT) / dT + 1; - oheight = (iheight - kH) / dH + 1; - owidth = (iwidth - kW) / dW + 1; - - /* get contiguous input */ - input = THTensor_(newContiguous)(input); - - if (input->nDimension == 4) /* non-batch mode */ - { - /* resize output */ - THTensor_(resize4d)(output, nslices, otime, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - - THNN_(VolumetricAveragePooling_updateOutput_frame)( - input_data, output_data, nslices, - itime, iwidth, iheight, - otime, owidth, oheight, - kT, kW, kH, - dT, dW, dH - ); - } - else /* batch mode */ - { - long p; - long nBatch = input->size[0]; - - long istride = nslices * itime * iwidth * iheight; - long ostride = nslices * otime * owidth * oheight; - - /* resize output */ - THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - -#pragma omp parallel for private(p) - for (p=0; p < nBatch; p++) - { - THNN_(VolumetricAveragePooling_updateOutput_frame)( - input_data + p * istride, output_data + p * ostride, nslices, - itime, iwidth, iheight, - otime, owidth, oheight, - kT, kW, kH, - dT, dW, dH - ); - } - } - - /* cleanup */ - THTensor_(free)(input); -} - -static void THNN_(VolumetricAveragePooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, - long nslices, - long itime, - long iwidth, - long iheight, - long otime, - long owidth, - long oheight, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH) -{ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - /* loop over output */ - long i, j, ti; - for (ti = 0; ti < otime; ti++) - { - for (i = 0; i < oheight; i++) - { - for (j = 0; j < owidth; j++) - { - /* local pointers */ - real *ip = gradInput_p + k * itime * iwidth * iheight - + ti * iwidth * iheight * dT + i * iwidth * dH + j * dW; - real *op = gradOutput_p + k * otime * owidth * oheight - + ti * owidth * oheight + i * owidth + j; - - /* scatter gradients out to footprint: */ - real val = *op / (kT * kW * kH); - int x,y,z; - for (z=0; z < kT; z++) - { - for (y = 0; y < kH; y++) - { - for (x = 0; x < kW; x++) - { - *(ip + z * iwidth * iheight + y * iwidth + x) += val; - } - } - } - } - } - } - } -} - -void THNN_(VolumetricAveragePooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH) -{ - int nslices; - int itime; - int iheight; - int iwidth; - int otime; - int oheight; - int owidth; - real *gradInput_data; - real *gradOutput_data; - - int dimN = 0; - int dimt = 1; - int dimh = 2; - int dimw = 3; - - THNN_(VolumetricAveragePooling_shapeCheck)( - state, input, gradOutput, kT, kW, kH, - dT, dW, dH); - - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - if (input->nDimension == 5) - { - dimN++; - dimt++; - dimh++; - dimw++; - } - - /* sizes */ - nslices = input->size[dimN]; - itime = input->size[dimt]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - otime = gradOutput->size[dimt]; - oheight = gradOutput->size[dimh]; - owidth = gradOutput->size[dimw]; - - /* get raw pointers */ - gradInput_data = THTensor_(data)(gradInput); - gradOutput_data = THTensor_(data)(gradOutput); - - /* backprop */ - if (input->nDimension == 4) /* non-batch mode*/ - { - THNN_(VolumetricAveragePooling_updateGradInput_frame)( - gradInput_data, gradOutput_data, nslices, - itime, iwidth, iheight, - otime, owidth, oheight, - kT, kW, kH, - dT, dW, dH - ); - } - else /* batch mode */ - { - long p; - long nBatch = input->size[0]; - - long istride = nslices * itime * iwidth * iheight; - long ostride = nslices * otime * owidth * oheight; - -#pragma omp parallel for private(p) - for (p = 0; p < nBatch; p++) - { - THNN_(VolumetricAveragePooling_updateGradInput_frame)( - gradInput_data + p * istride, gradOutput_data + p * ostride, nslices, - itime, iwidth, iheight, - otime, owidth, oheight, - kT, kW, kH, - dT, dW, dH - ); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricConvolution.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricConvolution.c deleted file mode 100644 index be1aa82e63..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricConvolution.c +++ /dev/null @@ -1,260 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricConvolution.c" -#else - -void THNN_(VolumetricConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, // only used by cuda impl - THTensor *fgradInput, // only used by cuda impl - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) -{ - THArgCheck(pT != 0 || pW != 0 || pH != 0, 9, "padding not supported by CPU backend"); // sharing signature with CUDA version - - THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, - "4D or 5D (batch mode) tensor expected for input, but got: %s"); - - int dimt = 1; - int dimh = 2; - int dimw = 3; - - if (input->nDimension == 5) - { - dimt++; - dimh++; - dimw++; - } - - long nOutputPlane = weight->size[0]; - long kT = weight->size[2]; - long kH = weight->size[3]; - long kW = weight->size[4]; - long inputDepth = input->size[dimt]; - long inputHeight = input->size[dimh]; - long inputWidth = input->size[dimw]; - long outputDepth = (inputDepth - kT) / dT + 1; - long outputWidth = (inputWidth - kW) / dW + 1; - long outputHeight = (inputHeight - kH) / dH + 1; - THTensor *outn = THTensor_(new)(); - long i, j; - if (input->nDimension == 4) /* non-batch mode */ - { - THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth); - - /* add bias */ - if (bias) { - for (i = 0; i < bias->size[0]; i++) - { - THTensor_(select)(outn, output, 0, i); - THTensor_(fill)(outn, THTensor_(get1d)(bias, i)); - } - } else { - THTensor_(zero)(output); - } - - /* do convolutions */ - THTensor_(conv3Dmv)(output, 1.0, 1.0, input, weight, dT, dH, dW, "V", "X"); - } - else /* batch mode */ - { - long nBatch = input->size[0]; - THTensor_(resize5d)(output, nBatch, nOutputPlane, outputDepth, outputHeight, outputWidth); - THTensor *inb = THTensor_(new)(); - THTensor *outb = THTensor_(new)(); - - /* loop over batches */ - for (j = 0; j < nBatch; j++) - { - THTensor_(select)(inb, input, 0, j); - THTensor_(select)(outb, output, 0, j); - - /* add bias */ - if (bias) { - for (i = 0; i < bias->size[0]; i++) - { - THTensor_(select)(outn, outb, 0, i); - THTensor_(fill)(outn, THTensor_(get1d)(bias, i)); - } - } else { - THTensor_(zero)(outb); - } - - /* do convolutions */ - THTensor_(conv3Dmv)(outb, 1.0, 1.0, inb, weight, dT, dH, dW, "V", "X"); - } - - THTensor_(free)(inb); - THTensor_(free)(outb); - } - THTensor_(free)(outn); -} - -void THNN_(VolumetricConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, // only used by cuda impl - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) -{ - THArgCheck(pT != 0 || pW != 0 || pH != 0, 9, "padding not supported by CPU backend"); // sharing signature with CUDA version - - THNN_ARGCHECK(weight->nDimension == 5, 4, weight, - "5D (nOutputPlane x nInputPlane x kT x kH x kW) tensor " - "expected for weight, but got: %s"); - - int nOutputPlane = (int)weight->size[0]; - - THNN_ARGCHECK(gradOutput->nDimension == 4 || gradOutput->nDimension == 5, 3, - gradOutput, - "4D or 5D (batch mode) tensor expected for gradOutput, but got: %s"); - - int dimPlane = 0; - if (gradOutput->nDimension == 5) - { - dimPlane++; - } - - THArgCheck(nOutputPlane == gradOutput->size[dimPlane], 1, - "Number of output features is not equal to nOutputPlane" - ); - - /* gradient to input */ - THTensor *tweight = THTensor_(newTranspose)(weight, 0, 1); - if (gradOutput->nDimension == 4) /* non-batch mode */ - { - THTensor_(conv3Dmv)(gradInput, 0.0, 1.0, gradOutput, tweight, dT, dH, dW, "F", "C"); - } - else /* batch mode */ - { - long nBatch = gradOutput->size[0]; - THTensor *ginpb = THTensor_(new)(); - THTensor *goutb = THTensor_(new)(); - long j; - - THTensor_(resize5d)(gradInput, - input->size[0], input->size[1], input->size[2], input->size[3], input->size[4] - ); - - /* loop over batches */ - for (j = 0; j < nBatch; j++) - { - THTensor_(select)(ginpb, gradInput, 0, j); - THTensor_(select)(goutb, gradOutput, 0, j); - THTensor_(conv3Dmv)(ginpb, 0.0, 1.0, goutb, tweight, dT, dH, dW, "F", "C"); - } - THTensor_(free)(ginpb); - THTensor_(free)(goutb); - } - - THTensor_(free)(tweight); -} - -void THNN_(VolumetricConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, // only used by cuda impl - THTensor *fgradInput, // only used by cuda impl - int dT, - int dW, - int dH, - int pT, - int pW, - int pH, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - THArgCheck(pT != 0 || pW != 0 || pH != 0, 9, "padding not supported by CPU backend"); // sharing signature with CUDA version - - THNN_ARGCHECK(gradWeight->nDimension == 5, 4, gradWeight, - "5D (nOutputPlane x nInputPlane x kT x kH x kW) tensor " - "expected for gradWeight, but got: %s"); - - int nOutputPlane = (int)gradWeight->size[0]; - if (gradBias) { - THArgCheck(gradBias->nDimension == 1 && gradBias->size[0] == nOutputPlane, 5, - "gradBias tensor has wrong size" - ); - } - - long k; - real *gradBias_data; - THTensor *gradOutSlice; - int dimPlane = 0; - if (gradOutput->nDimension == 5) - { - dimPlane++; - } - - THArgCheck(nOutputPlane == gradOutput->size[dimPlane], 1, - "Number of output features is not equal to nOutputPlane" - ); - - if (gradOutput->nDimension == 4) /* non-batch mode */ - { - /* gradient to bias */ - if (gradBias) { - gradBias_data = THTensor_(data)(gradBias); - gradOutSlice = THTensor_(new)(); - for (k = 0; k < nOutputPlane; k++) - { - THTensor_(select)(gradOutSlice, gradOutput, 0, k); - gradBias_data[k] += scale * THTensor_(sumall)(gradOutSlice); - } - THTensor_(free)(gradOutSlice); - } - - /* gradient to kernels */ - THTensor_(conv3DRevger)(gradWeight, 1.0, scale, input, gradOutput, dT, dH, dW); - } - else /* batch mode */ - { - long nBatch = gradOutput->size[0]; - THTensor *inpb = THTensor_(new)(); - THTensor *goutb = THTensor_(new)(); - long j; - - /* loop over batches */ - for (j = 0; j < nBatch; j++) - { - THTensor_(select)(inpb, input, 0, j); - THTensor_(select)(goutb, gradOutput, 0, j); - - /* gradient to bias */ - if (gradBias) { - gradBias_data = THTensor_(data)(gradBias); - gradOutSlice = THTensor_(new)(); - for (k = 0; k < nOutputPlane; k++) - { - THTensor_(select)(gradOutSlice, goutb, 0, k); - gradBias_data[k] += scale * THTensor_(sumall)(gradOutSlice); - } - THTensor_(free)(gradOutSlice); - } - - /* gradient to kernels */ - THTensor_(conv3DRevger)(gradWeight, 1.0, scale, inpb, goutb, dT, dH, dW); - } - THTensor_(free)(inpb); - THTensor_(free)(goutb); - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricConvolutionMM.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricConvolutionMM.c deleted file mode 100644 index 00a121db6b..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricConvolutionMM.c +++ /dev/null @@ -1,628 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricConvolutionMM.c" -#else - -static void inline THNN_(VolumetricConvolutionMM_shapeCheck)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *weight, - THTensor *bias, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) { - THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, - "4D or 5D (batch mode) tensor expected for input, but got: %s"); - THArgCheck(kT > 0 && kW > 0 && kH > 0, 8, - "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); - THArgCheck(dT > 0 && dW > 0 && dH > 0, 11, - "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); - - int ndim = input->nDimension; - int dimf = 0; - int dimt = 1; - int dimh = 2; - int dimw = 3; - - if (ndim == 5) - { - dimf++; - dimt++; - dimh++; - dimw++; - } - - long nInputPlane; - long inputDepth; - long inputHeight; - long inputWidth; - long nOutputPlane; - long outputDepth; - long outputHeight; - long outputWidth; - - nInputPlane = input->size[dimf]; - inputDepth = input->size[dimt]; - inputHeight = input->size[dimh]; - inputWidth = input->size[dimw]; - nOutputPlane = weight->size[0]; - outputDepth = (inputDepth + 2*pT - kT) / dT + 1; - outputHeight = (inputHeight + 2*pH - kH) / dH + 1; - outputWidth = (inputWidth + 2*pW - kW) / dW + 1; - - if (outputWidth < 1 || outputHeight < 1 || outputDepth < 1) - { - THError( - "Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", - nInputPlane, inputDepth, inputHeight, inputWidth, - nOutputPlane, outputDepth, outputHeight, outputWidth - ); - } - - THArgCheck(weight->nDimension == 2 || weight->nDimension == 5, 4, - "weight tensor should be 2D or 5D - got %d", weight->nDimension); - - if (bias != NULL) { - THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]); - } - - THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, outputDepth); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); - } -} - -static int THNN_(view_weight)(THTensor **_weight) -{ - THTensor *weight = *_weight; - if (weight->nDimension == 5) { - long s1 = weight->size[0]; - long s2 = weight->size[1] * weight->size[2] * weight->size[3] * weight->size[4]; - *_weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset, s1, -1, s2, -1); - return 1; - } - return 0; -} - -/* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */ -static void THNN_(unfolded_acc_vol)( - THTensor *finput, - THTensor *input, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH, - int nInputPlane, - int inputDepth, - int inputWidth, - int inputHeight, - int outputDepth, - int outputWidth, - int outputHeight) -{ - int nip; - real *input_data = THTensor_(data)(input); - real *finput_data = THTensor_(data)(finput); - -//#pragma omp parallel for private(nip) - for (nip = 0; nip < nInputPlane; nip++) - { - int kt, kw, kh, t, y, x, it, ix, iy; - for (kt = 0; kt < kT; kt++) - { - for (kh = 0; kh < kH; kh++) - { - for (kw = 0; kw < kW; kw++) - { - real *src = finput_data - + nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth) - + kt * (kH*kW*outputDepth*outputHeight*outputWidth) - + kh * (kW*outputDepth*outputHeight*outputWidth) - + kw * (outputDepth*outputHeight*outputWidth); - - real *dst = input_data + nip*(inputDepth*inputHeight*inputWidth); - if (pT > 0 || pH > 0 || pW > 0) - { - for (t = 0; t < outputDepth; t++) - { - it = t*dT - pT + kt; - for (y = 0; y < outputHeight; y++) - { - iy = y*dH - pH + kh; - for (x = 0; x < outputWidth; x++) - { - ix = x*dW - pW + kw; - if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth) - { - } - else - { - real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix; - THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1); - } - } - } - } - } - else - { - for (t = 0; t < outputDepth; t++) - { - it = t*dT + kt; - for (y = 0; y < outputHeight; y++) - { - iy = y*dH + kh; - for(x = 0; x < outputWidth; x++) - { - ix = x*dW + kw; - real *dst_slice = dst+it*inputHeight*inputWidth+iy*inputWidth+ix; - THVector_(cadd)(dst_slice, dst_slice, src+t*outputHeight*outputWidth+y*outputWidth+x, 1, 1); - } - } - } - } - } - } - } - } -} - -static void THNN_(unfolded_copy_vol)( - THTensor *finput, - THTensor *input, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH, - int nInputPlane, - int inputDepth, - int inputWidth, - int inputHeight, - int outputDepth, - int outputWidth, - int outputHeight) -{ - long k; - real *input_data = THTensor_(data)(input); - real *finput_data = THTensor_(data)(finput); -// #pragma omp parallel for private(k) - for (k = 0; k < nInputPlane*kT*kH*kW; k++) - { - int nip = k / (kT*kH*kW); - int rest = k % (kT*kH*kW); - int kt = rest / (kH*kW); - rest = rest % (kH*kW); - int kh = rest / kW; - int kw = rest % kW; - int t,x,y,it,ix,iy; - real *dst = finput_data - + nip * (kT*kH*kW*outputDepth*outputHeight*outputWidth) - + kt * (kH*kW*outputDepth*outputHeight*outputWidth) - + kh * (kW*outputDepth*outputHeight*outputWidth) - + kw * (outputDepth*outputHeight*outputWidth); - real *src = input_data + nip*(inputDepth*inputHeight*inputWidth); - - if (pT > 0 || pH > 0 || pW > 0) - { - for (t = 0; t < outputDepth; t++) - { - it = t*dT - pT + kt; - for (y = 0; y < outputHeight; y++) - { - iy = y*dH - pH + kh; - for (x = 0; x < outputWidth; x++) - { - ix = x*dW - pW + kw; - if (it < 0 || it >= inputDepth || iy < 0 || iy >= inputHeight || ix < 0 || ix >= inputWidth) - memset(dst+t*outputHeight*outputWidth+y*outputWidth+x, 0, sizeof(real)*(1)); - else - memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1)); - } - } - } - } - else - { - for (t = 0; t < outputDepth; t++) - { - it = t*dT + kt; - for (y = 0; y < outputHeight; y++) - { - iy = y*dH + kh; - for(x = 0; x < outputWidth; x++) - { - ix = x*dW + kw; - memcpy(dst+t*outputHeight*outputWidth+y*outputWidth+x, src+it*inputHeight*inputWidth+iy*inputWidth+ix, sizeof(real)*(1)); - } - } - } - } - } -} - -static void THNN_(VolumetricConvolutionMM_updateOutput_frame)( - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH, - long nInputPlane, - long inputDepth, - long inputWidth, - long inputHeight, - long nOutputPlane, - long outputDepth, - long outputWidth, - long outputHeight) -{ - long i; - THTensor *output2d; - - THNN_(unfolded_copy_vol)( - finput, input, - kT, kW, kH, - dT, dW, dH, - pT, pW, pH, - nInputPlane, - inputDepth, inputWidth, inputHeight, - outputDepth, outputWidth, outputHeight - ); - - output2d = THTensor_(newWithStorage2d)( - output->storage, output->storageOffset, nOutputPlane, -1, - outputDepth*outputHeight*outputWidth, -1 - ); - - if (bias) { - for (i = 0; i < nOutputPlane; i++) - { - THVector_(fill)( - output->storage->data+output->storageOffset+output->stride[0]*i, - THTensor_(get1d)(bias, i), - outputDepth*outputHeight*outputWidth - ); - } - } else { - THTensor_(zero)(output); - } - - THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); - - THTensor_(free)(output2d); -} - -void THNN_(VolumetricConvolutionMM_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *finput, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) -{ - int dimf = 0; - int dimt = 1; - int dimh = 2; - int dimw = 3; - int freeWeight = 0; - - long nInputPlane; - long inputDepth; - long inputHeight; - long inputWidth; - long nOutputPlane; - long outputDepth; - long outputHeight; - long outputWidth; - - THNN_(VolumetricConvolutionMM_shapeCheck)( - state, input, NULL, weight, bias, - kT, kW, kH, dT, dW, dH, pT, pW, pH); - input = THTensor_(newContiguous)(input); - - if (input->nDimension == 5) - { - dimf++; - dimt++; - dimh++; - dimw++; - } - - nInputPlane = input->size[dimf]; - inputDepth = input->size[dimt]; - inputHeight = input->size[dimh]; - inputWidth = input->size[dimw]; - nOutputPlane = weight->size[0]; - outputDepth = (inputDepth + 2*pT - kT) / dT + 1; - outputHeight = (inputHeight + 2*pH - kH) / dH + 1; - outputWidth = (inputWidth + 2*pW - kW) / dW + 1; - - freeWeight = THNN_(view_weight)(&weight); - - if (input->nDimension == 4) - { - THTensor_(resize2d)(finput, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); - THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth); - - THNN_(VolumetricConvolutionMM_updateOutput_frame)( - input, output, weight, bias, finput, - kT, kW, kH, - dT, dW, dH, - pT, pW, pH, - nInputPlane, inputDepth, inputWidth, inputHeight, - nOutputPlane, outputDepth, outputWidth, outputHeight - ); - } - else - { - long T = input->size[0]; - long t; - - THTensor_(resize3d)(finput, T, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); - THTensor_(resize5d)(output, T, nOutputPlane, outputDepth, outputHeight, outputWidth); - -// #pragma omp parallel for private(t) - for (t = 0; t < T; t++) - { - THTensor *input_t = THTensor_(newSelect)(input, 0, t); - THTensor *output_t = THTensor_(newSelect)(output, 0, t); - THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); - - THNN_(VolumetricConvolutionMM_updateOutput_frame)( - input_t, output_t, weight, bias, finput_t, - kT, kW, kH, - dT, dW, dH, - pT, pW, pH, - nInputPlane, inputDepth, inputWidth, inputHeight, - nOutputPlane, outputDepth, outputWidth, outputHeight - ); - - THTensor_(free)(input_t); - THTensor_(free)(output_t); - THTensor_(free)(finput_t); - } - } - - THTensor_(free)(input); - if (freeWeight) - THTensor_(free)(weight); -} - -static void THNN_(VolumetricConvolutionMM_updateGradInput_frame)( - THTensor *gradInput, - THTensor *gradOutput, - THTensor *weight, - THTensor *fgradInput, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) -{ - THTensor *gradOutput2d = THTensor_(newWithStorage2d)( - gradOutput->storage, gradOutput->storageOffset, - gradOutput->size[0], -1, - gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1 - ); - - THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); - THTensor_(free)(gradOutput2d); - - THTensor_(zero)(gradInput); - - THNN_(unfolded_acc_vol)( - fgradInput, gradInput, - kT, kW, kH, - dT, dW, dH, - pT, pW, pH, - gradInput->size[0], gradInput->size[1], gradInput->size[3], gradInput->size[2], - gradOutput->size[1], gradOutput->size[3], gradOutput->size[2] - ); -} - -void THNN_(VolumetricConvolutionMM_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) -{ - int nOutputPlane = (int)weight->size[0]; - - THNN_(VolumetricConvolutionMM_shapeCheck)( - state, input, gradOutput, weight, NULL, - kT, kW, kH, dT, dW, dH, pT, pW, pH); - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - - int freeWeight = THNN_(view_weight)(&weight); - - THTensor_(resizeAs)(gradInput, input); - THTensor_(resizeAs)(fgradInput, finput); - // depending on the BLAS library, fgradInput (result tensor) might - // be left uninitialized on zero alpha, which might lead to weird behavior - // hence, to be safe, zero it - THTensor_(zero)(fgradInput); - THTensor *tweight = THTensor_(new)(); - THTensor_(transpose)(tweight, weight, 0, 1); - - if (input->nDimension == 4) - { - THNN_(VolumetricConvolutionMM_updateGradInput_frame)( - gradInput, gradOutput, tweight, fgradInput, - kT, kW, kH, - dT, dW, dH, - pT, pW, pH - ); - } - else - { - long T = input->size[0]; - long t; - -//#pragma omp parallel for private(t) - for (t = 0; t < T; t++) - { - THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); - THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); - THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); - - THNN_(VolumetricConvolutionMM_updateGradInput_frame)( - gradInput_t, gradOutput_t, tweight, fgradInput_t, - kT, kW, kH, - dT, dW, dH, - pT, pW, pH - ); - - THTensor_(free)(gradInput_t); - THTensor_(free)(gradOutput_t); - THTensor_(free)(fgradInput_t); - } - } - - THTensor_(free)(tweight); - THTensor_(free)(input); - THTensor_(free)(gradOutput); - if (freeWeight) - THTensor_(free)(weight); -} - -static void THNN_(VolumetricConvolutionMM_accGradParameters_frame)( - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - real scale) -{ - long i; - THTensor *gradOutput2d = THTensor_(newWithStorage2d)( - gradOutput->storage, gradOutput->storageOffset, - gradOutput->size[0], -1, - gradOutput->size[1]*gradOutput->size[2]*gradOutput->size[3], -1 - ); - - THTensor *tfinput = THTensor_(new)(); - THTensor_(transpose)(tfinput, finput, 0, 1); - THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput); - THTensor_(free)(tfinput); - - if (gradBias) { - for (i = 0; i < gradBias->size[0]; i++) - { - long k; - real sum = 0; - real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0]; - for (k = 0; k < gradOutput2d->size[1]; k++) - sum += data[k]; - - (gradBias->storage->data + gradBias->storageOffset)[i] += scale * sum; - } - } - - THTensor_(free)(gradOutput2d); -} - -void THNN_(VolumetricConvolutionMM_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - int kT, int kW, int kH, - int dT, int dW, int dH, - int pT, int pW, int pH, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - int freeWeight; - int nOutputPlane = (int)gradWeight->size[0]; - - THNN_(VolumetricConvolutionMM_shapeCheck)( - state, input, gradOutput, gradWeight, gradBias, - kT, kW, kH, dT, dW, dH, pT, pW, pH); - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - - freeWeight = THNN_(view_weight)(&gradWeight); - - if (input->nDimension == 4) // non-batch mode - { - THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); - } - else // batch mode - { - long T = input->size[0]; - long t; - - for (t = 0; t < T; t++) - { - THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); - THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); - - THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); - - THTensor_(free)(gradOutput_t); - THTensor_(free)(finput_t); - } - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); - if (freeWeight) - THTensor_(free)(gradWeight); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricDilatedConvolution.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricDilatedConvolution.c deleted file mode 100644 index ca740f78e3..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricDilatedConvolution.c +++ /dev/null @@ -1,420 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricDilatedConvolution.c" -#else - -static inline void THNN_(VolumetricDilatedConvolution_shapeCheck)( - THTensor *input, THTensor *gradOutput, - THTensor *weight, THTensor *bias, - int kT, int kH, int kW, int dT, int dH, int dW, - int padT, int padH, int padW, - int dilationT, int dilationH, int dilationW) { - THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, - "4D or 5D (batch mode) tensor expected for input, but got: %s"); - THNN_ARGCHECK(weight->nDimension == 5, 4, weight, - "5D (nOutputPlane x nInputPlane x kT x kH x kW) tensor " - "expected for weight, but got: %s"); - THArgCheck(kT > 0 && kW > 0 && kH > 0, 8, - "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); - THArgCheck(dT > 0 && dW > 0 && dH > 0, 11, - "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); - THArgCheck(dilationT > 0 && dilationW > 0 && dilationH > 0, 15, - "dilation should be greater than zero, but got dilationT: %d, dilationH: %d, dilationW: %d", - dilationT, dilationH, dilationW); - if (bias != NULL) { - THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]); - } - - // Params - int ndim = input->nDimension; - int nInputPlane = weight->size[1]; - int nOutputPlane = weight->size[0]; - int dimf = 0; - int dimd = 1; - int dimh = 2; - int dimw = 3; - - if (ndim == 5) { - dimf++; - dimd++; - dimh++; - dimw++; - } - - long inputDepth = input->size[dimd]; - long inputHeight = input->size[dimh]; - long inputWidth = input->size[dimw]; - long outputDepth = (inputDepth + 2*padT - (dilationT * (kT - 1) + 1)) / dT + 1; - long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; - long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; - - if (outputDepth < 1 || outputWidth < 1 || outputHeight < 1) - THError("Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", - nInputPlane,inputDepth,inputHeight,inputWidth,nOutputPlane,outputDepth,outputHeight,outputWidth); - - THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimd, outputDepth); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); - } -} - -void THNN_(VolumetricDilatedConvolution_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THTensor *weight, - THTensor *bias, - THTensor *columns, - THTensor *ones, - int kT, int kW, int kH, - int dT, int dW, int dH, - int padT, int padW, int padH, - int dilationT, int dilationW, int dilationH) -{ - THNN_(VolumetricDilatedConvolution_shapeCheck)( - input, NULL, weight, bias, - kT, kH, kW, dT, dH, dW, padT, padH, padW, - dilationT, dilationH, dilationW); - - // Params: - int nInputPlane = weight->size[1]; - int nOutputPlane = weight->size[0]; - - input = THTensor_(newContiguous)(input); - weight = THTensor_(newContiguous)(weight); - bias = bias ? THTensor_(newContiguous)(bias) : bias; - int batch = 1; - if (input->nDimension == 4) { - // Force batch - batch = 0; - THTensor_(resize5d)(input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); - } - - long inputDepth = input->size[2]; - long inputHeight = input->size[3]; - long inputWidth = input->size[4]; - long outputDepth = (inputDepth + 2*padT - (dilationT * (kT - 1) + 1)) / dT + 1; - long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; - long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; - - // Batch size + input planes - long batchSize = input->size[0]; - - // Resize output - THTensor_(resize5d)(output, batchSize, nOutputPlane, outputDepth, outputHeight, outputWidth); - THTensor_(zero)(output); - - // Resize temporary columns - THTensor_(resize2d)(columns, nInputPlane*kT*kW*kH, outputDepth*outputHeight*outputWidth); - - // Define a buffer of ones, for bias accumulation - // Note: this buffer can be shared with other modules, it only ever gets increased, - // and always contains ones. - if (ones->nDimension != 3 || - ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) { - // Resize plane and fill with ones... - THTensor_(resize3d)(ones, outputDepth, outputHeight, outputWidth); - THTensor_(fill)(ones, 1); - } - - // Helpers - THTensor *input_n = THTensor_(new)(); - THTensor *output_n = THTensor_(new)(); - - // For each elt in batch, do: - for (int elt = 0; elt < batchSize; elt ++) { - // Matrix mulitply per output: - THTensor_(select)(input_n, input, 0, elt); - THTensor_(select)(output_n, output, 0, elt); - - // Do Bias first: - // M,N,K are dims of matrix A and B - long m_ = nOutputPlane; - long n_ = outputDepth * outputHeight * outputWidth; - long k_ = 1; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - if (bias) { - THBlas_(gemm)( - 't', 'n', - n_, m_, k_, - 1, - THTensor_(data)(ones), k_, - THTensor_(data)(bias), k_, - 0, - THTensor_(data)(output_n), n_ - ); - } else { - THTensor_(zero)(output_n); - } - - // Extract columns: - THNN_(vol2col)( - THTensor_(data)(input_n), - nInputPlane, inputDepth, inputHeight, inputWidth, - kT, kH, kW, padT, padH, padW, dT, dH, dW, - dilationT, dilationH, dilationW, - THTensor_(data)(columns) - ); - - // M,N,K are dims of matrix A and B - long m = nOutputPlane; - long n = columns->size[1]; - long k = nInputPlane*kT*kH*kW; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 'n', 'n', - n, m, k, - 1, - THTensor_(data)(columns), n, - THTensor_(data)(weight), k, - 1, - THTensor_(data)(output_n), n - ); - } - - // Free - THTensor_(free)(input_n); - THTensor_(free)(output_n); - - // Resize output - if (batch == 0) { - THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth); - THTensor_(resize4d)(input, nInputPlane, inputDepth, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(weight); - if (bias) THTensor_(free)(bias); -} - -void THNN_(VolumetricDilatedConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *gradColumns, - int kT, int kW, int kH, - int dT, int dW, int dH, - int padT, int padW, int padH, - int dilationT, int dilationW, int dilationH) -{ - THNN_(VolumetricDilatedConvolution_shapeCheck)( - input, gradOutput, weight, NULL, - kT, kH, kW, dT, dH, dW, padT, padH, padW, - dilationT, dilationH, dilationW); - - // Params - int nInputPlane = weight->size[1]; - int nOutputPlane = weight->size[0]; - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - weight = THTensor_(newContiguous)(weight); - - int batch = 1; - if (input->nDimension == 4) { - // Force batch - batch = 0; - THTensor_(resize5d)(input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); - THTensor_(resize5d)(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); - } - - long inputDepth = input->size[2]; - long inputWidth = input->size[4]; - long inputHeight = input->size[3]; - long outputDepth = (inputDepth + 2*padT - (dilationT * (kT - 1) + 1)) / dT + 1; - long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - // Batch size + input planes - long batchSize = input->size[0]; - - // Resize output - THTensor_(resize5d)(gradInput, batchSize, nInputPlane, inputDepth, inputHeight, inputWidth); - - // Resize temporary columns - THTensor_(resize2d)(gradColumns, nInputPlane*kT*kW*kH, outputDepth*outputHeight*outputWidth); - THTensor_(zero)(gradColumns); - - // Helpers - THTensor *gradInput_n = THTensor_(new)(); - THTensor *gradOutput_n = THTensor_(new)(); - - // For each elt in batch, do: - for (int elt = 0; elt < batchSize; elt ++) { - // Matrix mulitply per sample: - THTensor_(select)(gradInput_n, gradInput, 0, elt); - THTensor_(select)(gradOutput_n, gradOutput, 0, elt); - - // M,N,K are dims of matrix A and B - long m = nInputPlane*kT*kW*kH; - long n = gradColumns->size[1]; - long k = nOutputPlane; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 'n', 't', - n, m, k, - 1, - THTensor_(data)(gradOutput_n), n, - THTensor_(data)(weight), m, - 0, - THTensor_(data)(gradColumns), n - ); - - // Unpack columns back into input: - THNN_(col2vol)( - THTensor_(data)(gradColumns), - nInputPlane, inputDepth, inputHeight, inputWidth, - kT, kH, kW, padT, padH, padW, dT, dH, dW, - dilationT, dilationH, dilationW, - THTensor_(data)(gradInput_n) - ); - } - - // Free - THTensor_(free)(gradInput_n); - THTensor_(free)(gradOutput_n); - - // Resize output - if (batch == 0) { - THTensor_(resize4d)(gradOutput, nOutputPlane, outputDepth, outputHeight, outputWidth); - THTensor_(resize4d)(input, nInputPlane, inputDepth, inputHeight, inputWidth); - THTensor_(resize4d)(gradInput, nInputPlane, inputDepth, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(weight); -} - -void THNN_(VolumetricDilatedConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *columns, - THTensor *ones, - int kT, int kW, int kH, - int dT, int dW, int dH, - int padT, int padW, int padH, - int dilationT, int dilationW, int dilationH, - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - THNN_(VolumetricDilatedConvolution_shapeCheck)( - input, gradOutput, gradWeight, gradBias, - kT, kH, kW, dT, dH, dW, padT, padH, padW, - dilationT, dilationH, dilationW); - - // Params - int nInputPlane = gradWeight->size[1]; - int nOutputPlane = gradWeight->size[0]; - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - - int batch = 1; - if (input->nDimension == 4) { - // Force batch - batch = 0; - THTensor_(resize5d)(input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); - THTensor_(resize5d)(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); - } - - long inputDepth = input->size[2]; - long inputWidth = input->size[4]; - long inputHeight = input->size[3]; - long outputDepth = (inputDepth + 2*padT - (dilationT * (kT - 1) + 1)) / dT + 1; - long outputWidth = (inputWidth + 2*padW - (dilationW * (kW - 1) + 1)) / dW + 1; - long outputHeight = (inputHeight + 2*padH - (dilationH * (kH - 1) + 1)) / dH + 1; - - // Batch size + input planes - long batchSize = input->size[0]; - - // Define a buffer of ones, for bias accumulation - if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) { - // Resize plane and fill with ones... - THTensor_(resize3d)(ones, outputDepth, outputHeight, outputWidth); - THTensor_(fill)(ones, 1); - } - - // Resize temporary columns - THTensor_(resize2d)(columns, nInputPlane*kT*kW*kH, outputDepth*outputHeight*outputWidth); - - // Helpers - THTensor *input_n = THTensor_(new)(); - THTensor *gradOutput_n = THTensor_(new)(); - - // For each elt in batch, do: - for (int elt = 0; elt < batchSize; elt ++) { - // Matrix mulitply per output: - THTensor_(select)(input_n, input, 0, elt); - THTensor_(select)(gradOutput_n, gradOutput, 0, elt); - - // Extract columns: - THNN_(vol2col)( - THTensor_(data)(input_n), - nInputPlane, inputDepth, inputHeight, inputWidth, - kT, kH, kW, padT, padH, padW, dT, dH, dW, - dilationT, dilationH, dilationW, - THTensor_(data)(columns) - ); - - // M,N,K are dims of matrix A and B - long m = nOutputPlane; - long n = nInputPlane*kT*kW*kH; - long k = columns->size[1]; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 't', 'n', - n, m, k, - scale, - THTensor_(data)(columns), k, - THTensor_(data)(gradOutput_n), k, - 1, - THTensor_(data)(gradWeight), n - ); - - // Do Bias: - // M,N,K are dims of matrix A and B - long m_ = nOutputPlane; - long k_ = outputDepth * outputHeight * outputWidth; - - // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) - if (gradBias) { - THBlas_(gemv)( - 't', - k_, m_, - scale, - THTensor_(data)(gradOutput_n), k_, - THTensor_(data)(ones), 1, - 1, - THTensor_(data)(gradBias), 1 - ); - } - } - - // Free - THTensor_(free)(input_n); - THTensor_(free)(gradOutput_n); - - // Resize - if (batch == 0) { - THTensor_(resize4d)(gradOutput, nOutputPlane, outputDepth, outputHeight, outputWidth); - THTensor_(resize4d)(input, nInputPlane, inputDepth, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricDilatedMaxPooling.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricDilatedMaxPooling.c deleted file mode 100644 index 66c0f9531f..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricDilatedMaxPooling.c +++ /dev/null @@ -1,515 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricDilatedMaxPooling.c" -#else - -static inline void THNN_(VolumetricDilatedMaxPooling_shapeCheck)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THIndexTensor *indices, - int kT, int kW, int kH, - int dT, int dW, int dH, - int pT, int pW, int pH, - int dilationT, int dilationW, int dilationH, - bool ceilMode) { - int ndim = input->nDimension; - int dimN = 0; - int dimt = 1; - int dimh = 2; - int dimw = 3; - long nslices; - long itime; - long iheight; - long iwidth; - long otime; - long oheight; - long owidth; - - THArgCheck(kT > 0 && kW > 0 && kH > 0, 5, - "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", - kT, kH, kW); - THArgCheck(dT > 0 && dW > 0 && dH > 0, 8, - "stride should be greater than zero, but got dT: %d dH: %d dW: %d", - dT, dH, dW); - THArgCheck(dilationT > 0 && dilationW > 0 && dilationH > 0, 14, - "dilation should be greater than 0, but got dilationT: %d dilationH: %d dilationW: %d", - dilationT, dilationH, dilationW); - - THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, - "4D or 5D (batch mode) tensor expected for input, but got: %s"); - - if (input->nDimension == 5) - { - dimN++; - dimt++; - dimh++; - dimw++; - } - - THArgCheck(kT/2 >= pT && kW/2 >= pW && kH/2 >= pH, 2, - "pad should be smaller than half of kernel size, but got " - "kT: %d kW: %d, kH: %d, padT: %d, padW: %d, padH: %d", - kT, kW, kH, pT, pW, pH); - - nslices = input->size[dimN]; - itime = input->size[dimt]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - if (ceilMode) - { - otime = (int)(ceil((float)(itime - (dilationT * (kT - 1) + 1) + 2*pT) / dT)) + 1; - oheight = (int)(ceil((float)(iheight - (dilationH * (kH - 1) + 1) + 2*pH) / dH)) + 1; - owidth = (int)(ceil((float)(iwidth - (dilationW * (kW - 1) + 1) + 2*pW) / dW)) + 1; - } - else - { - otime = (int)(floor((float)(itime - (dilationT * (kT - 1) + 1) + 2*pT) / dT)) + 1; - oheight = (int)(floor((float)(iheight - (dilationH * (kH - 1) + 1) + 2*pH) / dH)) + 1; - owidth = (int)(floor((float)(iwidth - (dilationW * (kW - 1) + 1) + 2*pW) / dW)) + 1; - } - - if (pT || pW || pH) - { - // ensure that the last pooling starts inside the image - if ((otime - 1)*dT >= itime + pT) - --otime; - if ((oheight - 1)*dH >= iheight + pH) - --oheight; - if ((owidth - 1)*dW >= iwidth + pW) - --owidth; - } - - if (otime < 1 || owidth < 1 || oheight < 1) - THError("Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", - nslices,itime,iheight,iwidth,nslices,otime,oheight,owidth); - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth); - } - if (indices != NULL) { - THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimN, nslices); - THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimt, otime); - THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimh, oheight); - THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimw, owidth); - } -} - -static void THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( - real *input_p, - real *output_p, - THIndex_t *indz_p, - long nslices, - long itime, - long iwidth, - long iheight, - long otime, - long owidth, - long oheight, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH, - int dilationT, - int dilationW, - int dilationH) -{ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - /* loop over output */ - long i, j, ti; - for (ti = 0; ti < otime; ti++) - { - for (i = 0; i < oheight; i++) - { - for (j = 0; j < owidth; j++) - { - /* local pointers */ - - long start_t = ti * dT - pT; - long start_h = i * dH - pH; - long start_w = j * dW - pW; - - long kernel_t = fminf(kT, kT + start_t); - long kernel_h = fminf(kH, kH + start_h); - long kernel_w = fminf(kW, kW + start_w); - - while(start_t < 0) - start_t += dilationT; - while(start_h < 0) - start_h += dilationH; - while(start_w < 0) - start_w += dilationW; - - real *ip = input_p + k * itime * iwidth * iheight - + start_t * iwidth * iheight + start_h * iwidth + start_w; - real *op = output_p + k * otime * owidth * oheight - + ti * owidth * oheight + i * owidth + j; - THIndex_t *indzp = indz_p + k * otime * owidth * oheight - + ti * owidth * oheight + i * owidth + j; - - /* compute local max: */ - real maxval = -THInf; - int x,y,z; - int mx, my, mz; - mx = my = mz = -1; - - for (z = 0; z < kernel_t; z++) - { - for (y = 0; y < kernel_h; y++) - { - for (x = 0; x < kernel_w; x++) - { - if ((start_t + z * dilationT < itime) && (start_h + y * dilationH < iheight) && (start_w + x * dilationW < iwidth)) - { - real val = *(ip + z * dilationT * iwidth * iheight + y * dilationH * iwidth + x * dilationW); - if (val > maxval) - { - maxval = val; - // Store indices w.r.t the kernel dimension - mz = z + (kT - kernel_t); - my = y + (kH - kernel_h); - mx = x + (kW - kernel_w); - } - } - } - } - } - - // set max values - ((unsigned char*)(indzp))[0] = mz; - ((unsigned char*)(indzp))[1] = my; - ((unsigned char*)(indzp))[2] = mx; - ((unsigned char*)(indzp))[3] = 0; - - /* set output to local max */ - *op = maxval; - } - } - } - } -} - -void THNN_(VolumetricDilatedMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH, - int dilationT, - int dilationW, - int dilationH, - bool ceilMode) -{ - long nslices; - long itime; - long iheight; - long iwidth; - long otime; - long oheight; - long owidth; - real *input_data; - real *output_data; - THIndex_t *indices_data; - - - int dimN = 0; - int dimt = 1; - int dimh = 2; - int dimw = 3; - - if (input->nDimension == 5) - { - dimN++; - dimt++; - dimh++; - dimw++; - } - - THNN_(VolumetricDilatedMaxPooling_shapeCheck)( - state, input, NULL, NULL, - kT, kW, kH, dT, dW, dH, - pT, pW, pH, dilationT, dilationW, dilationH, - ceilMode); - - /* sizes */ - nslices = input->size[dimN]; - itime = input->size[dimt]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - if (ceilMode) - { - otime = (int)(ceil((float)(itime - (dilationT * (kT - 1) + 1) + 2*pT) / dT)) + 1; - oheight = (int)(ceil((float)(iheight - (dilationH * (kH - 1) + 1) + 2*pH) / dH)) + 1; - owidth = (int)(ceil((float)(iwidth - (dilationW * (kW - 1) + 1) + 2*pW) / dW)) + 1; - } - else - { - otime = (int)(floor((float)(itime - (dilationT * (kT - 1) + 1) + 2*pT) / dT)) + 1; - oheight = (int)(floor((float)(iheight - (dilationH * (kH - 1) + 1) + 2*pH) / dH)) + 1; - owidth = (int)(floor((float)(iwidth - (dilationW * (kW - 1) + 1) + 2*pW) / dW)) + 1; - } - - if (pT || pW || pH) - { - // ensure that the last pooling starts inside the image - if ((otime - 1)*dT >= itime + pT) - --otime; - if ((oheight - 1)*dH >= iheight + pH) - --oheight; - if ((owidth - 1)*dW >= iwidth + pW) - --owidth; - } - - /* get contiguous input */ - input = THTensor_(newContiguous)(input); - - if (input->nDimension == 4) /* non-batch mode */ - { - /* resize output */ - THTensor_(resize4d)(output, nslices, otime, oheight, owidth); - /* indices will contain ti,i,j uchar locations packed into float/double */ - THIndexTensor_(resize4d)(indices, nslices, otime, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - - THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( - input_data, output_data, - indices_data, - nslices, - itime, iwidth, iheight, - otime, owidth, oheight, - kT, kW, kH, - dT, dW, dH, - pT, pW, pH, - dilationT, dilationW, dilationH - ); - } - else /* batch mode */ - { - long p; - long nBatch = input->size[0]; - - long istride = nslices * itime * iwidth * iheight; - long ostride = nslices * otime * owidth * oheight; - - /* resize output */ - THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth); - /* indices will contain ti,i,j locations for each output point */ - THIndexTensor_(resize5d)(indices, nBatch, nslices, otime, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - -#pragma omp parallel for private(p) - for (p=0; p < nBatch; p++) - { - THNN_(VolumetricDilatedMaxPooling_updateOutput_frame)( - input_data + p * istride, - output_data + p * ostride, - indices_data + p * ostride, - nslices, - itime, iwidth, iheight, - otime, owidth, oheight, - kT, kW, kH, - dT, dW, dH, - pT, pW, pH, - dilationT, dilationW, dilationH - ); - } - } - - /* cleanup */ - THTensor_(free)(input); -} - -static void THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, - THIndex_t *indz_p, - long nslices, - long itime, - long iwidth, - long iheight, - long otime, - long owidth, - long oheight, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH, - int dilationT, - int dilationW, - int dilationH) -{ - long k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - real *gradInput_p_k = gradInput_p + k * itime * iwidth * iheight; - real *gradOutput_p_k = gradOutput_p + k * otime * owidth * oheight; - THIndex_t *indz_p_k = indz_p + k * otime * owidth * oheight; - - /* calculate max points */ - long ti, i, j; - for (ti = 0; ti < otime; ti++) - { - for (i = 0; i < oheight; i++) - { - for (j = 0; j < owidth; j++) - { - /* retrieve position of max */ - THIndex_t * indzp = &indz_p_k[ti * oheight * owidth + i * owidth + j]; - long maxti = ((unsigned char*)(indzp))[0] * dilationT + ti * dT - pT; - long maxi = ((unsigned char*)(indzp))[1] * dilationH + i * dH - pH; - long maxj = ((unsigned char*)(indzp))[2] * dilationW + j * dW - pW; - - if (maxti != -1) { - /* update gradient */ - gradInput_p_k[maxti * iheight * iwidth + maxi * iwidth + maxj] += - gradOutput_p_k[ti * oheight * owidth + i * owidth + j]; - } - } - } - } - } -} - -void THNN_(VolumetricDilatedMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH, - int dilationT, - int dilationW, - int dilationH, - bool ceilMode) -{ - int nslices; - int itime; - int iheight; - int iwidth; - int otime; - int oheight; - int owidth; - real *gradInput_data; - real *gradOutput_data; - THIndex_t *indices_data; - - int dimN = 0; - int dimt = 1; - int dimh = 2; - int dimw = 3; - - THNN_(VolumetricDilatedMaxPooling_shapeCheck)( - state, input, gradOutput, indices, - kT, kW, kH, dT, dW, dH, - pT, pW, pH, dilationT, dilationW, dilationH, - ceilMode); - - // TODO: gradOutput shape check - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - if (input->nDimension == 5) - { - dimN++; - dimt++; - dimh++; - dimw++; - } - - /* sizes */ - nslices = input->size[dimN]; - itime = input->size[dimt]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - otime = gradOutput->size[dimt]; - oheight = gradOutput->size[dimh]; - owidth = gradOutput->size[dimw]; - - /* get raw pointers */ - gradInput_data = THTensor_(data)(gradInput); - gradOutput_data = THTensor_(data)(gradOutput); - indices_data = THIndexTensor_(data)(indices); - - /* backprop */ - if (input->nDimension == 4) /* non-batch mode*/ - { - THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( - gradInput_data, gradOutput_data, - indices_data, - nslices, - itime, iwidth, iheight, - otime, owidth, oheight, - dT, dW, dH, - pT, pW, pH, - dilationT, dilationW, dilationH - ); - } - else /* batch mode */ - { - long p; - long nBatch = input->size[0]; - - long istride = nslices * itime * iwidth * iheight; - long ostride = nslices * otime * owidth * oheight; - -#pragma omp parallel for private(p) - for (p = 0; p < nBatch; p++) - { - THNN_(VolumetricDilatedMaxPooling_updateGradInput_frame)( - gradInput_data + p * istride, - gradOutput_data + p * ostride, - indices_data + p * ostride, - nslices, - itime, iwidth, iheight, - otime, owidth, oheight, - dT, dW, dH, - pT, pW, pH, - dilationT, dilationW, dilationH - ); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricFractionalMaxPooling.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricFractionalMaxPooling.c deleted file mode 100644 index 236986bb97..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricFractionalMaxPooling.c +++ /dev/null @@ -1,279 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricFractionalMaxPooling.c" -#else - -static long* THNN_(VolumetricFractionalMaxPooling_generateIntervals)( - real sample, - long inputSize, - long outputSize, - int poolSize) { - real alpha = (real) (inputSize - poolSize) / (real) (outputSize - 1); - long* sequence = (long*) THAlloc(sizeof(long) * outputSize); - - long i; - for (i = 0; i < outputSize - 1; ++i) { - sequence[i] = - (long) ((i + sample) * alpha) - (long) (sample * alpha); - } - sequence[outputSize - 1] = inputSize - poolSize; - - return sequence; -} - -static void THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)( - real* input, - real* output, - THIndex_t* indices, - real* randomSamples, - long numPlanes, - long inputT, long inputW, long inputH, - long outputT, long outputW, long outputH, - int poolSizeT, int poolSizeW, int poolSizeH) { - long plane; -#pragma omp parallel for private(plane) - for (plane = 0; plane < numPlanes; ++plane) { - /* each plane contains 3 random samples, one for T, one for W, and one for H */ - real* randomSamplesForPlane = randomSamples + plane * 3; - - /* Generate interval sequence */ - long* sequenceT = - THNN_(VolumetricFractionalMaxPooling_generateIntervals)( - randomSamplesForPlane[0], inputT, outputT, poolSizeT); - long* sequenceW = - THNN_(VolumetricFractionalMaxPooling_generateIntervals)( - randomSamplesForPlane[1], inputW, outputW, poolSizeW); - long* sequenceH = - THNN_(VolumetricFractionalMaxPooling_generateIntervals)( - randomSamplesForPlane[2], inputH, outputH, poolSizeH); - - /* loop over output */ - long h, w, t; - - real* inputForPlane = input + plane * inputT * inputW * inputH; - real* outputForPlane = output + plane * outputT * outputW * outputH; - THIndex_t* indicesForPlane = indices + plane * outputT * outputW * outputH; - - for (h = 0; h < outputH; ++h) { - long inputHStart = sequenceH[h]; - - for (w = 0; w < outputW; ++w) { - long inputWStart = sequenceW[w]; - - for (t = 0; t < outputT; ++t) { - long inputTStart = sequenceT[t]; - - real maxVal = -THInf; - long maxIndex = -1; - - long h2, w2, t2; - for (h2 = inputHStart; h2 < inputHStart + poolSizeH; ++h2) { - for (w2 = inputWStart; w2 < inputWStart + poolSizeW; ++w2) { - for (t2 = inputTStart; t2 < inputTStart + poolSizeT; ++t2) { - THAssert(h2 >= 0 && h2 < inputH); - THAssert(w2 >= 0 && w2 < inputW); - THAssert(t2 >= 0 && t2 < inputT); - - long planeIndex = h2 * inputW * inputT + w2 * inputT + t2; - real val = inputForPlane[planeIndex]; - if (val > maxVal) { - maxVal = val; - maxIndex = planeIndex; - } - } - } - } - - THAssert(maxVal != -THInf); - THAssert(maxIndex != -1); - - outputForPlane[h * outputW * outputT + w * outputT + t] = maxVal; - /* +1 to lua index */ - indicesForPlane[h * outputW * outputT + w * outputT + t] = maxIndex + TH_INDEX_BASE; - } - } - } - - THFree(sequenceT); - THFree(sequenceW); - THFree(sequenceH); - } -} - -void THNN_(VolumetricFractionalMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int outputT, int outputW, int outputH, - int poolSizeT, int poolSizeW, int poolSizeH, - THIndexTensor *indices, - THTensor *randomSamples) { - - long numBatch = 1; - int planeDim = 0; - int heightDim = 1; - int widthDim = 2; - int timeDim = 3; - - long numInputDims = THTensor_(nDimension)(input); - THNN_ARGCHECK(numInputDims == 4 || numInputDims == 5, 2, input, - "4D or 5D (batch mode) tensor expected for input, but got: %s"); - - if (numInputDims == 5) { - numBatch = THTensor_(size)(input, 0); - planeDim++; - heightDim++; - widthDim++; - timeDim++; - } - - /* sizes */ - long numPlanes = THTensor_(size)(input, planeDim); - long inputH = THTensor_(size)(input, heightDim); - long inputW = THTensor_(size)(input, widthDim); - long inputT = THTensor_(size)(input, timeDim); - - THArgCheck(outputH + poolSizeH - 1 < inputH, 9, - "poolSizeH (%d) too large relative to input height (%d)", - poolSizeH, inputH); - THArgCheck(outputW + poolSizeW - 1 < inputW, 8, - "poolSizeW (%d) too large relative to input width (%d)", - poolSizeW, inputW); - THArgCheck(outputT + poolSizeT - 1 < inputT, 7, - "poolSizeT (%d) too large relative to input time (%d)", - poolSizeT, inputT); - - /* get contiguous input */ - input = THTensor_(newContiguous)(input); - - if (numInputDims == 4) { - /* resize output */ - THTensor_(resize4d)(output, numPlanes, outputH, outputW, outputT); - /* indices will contain the locations for each output point */ - THIndexTensor_(resize4d)(indices, numPlanes, outputH, outputW, outputT); - - THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)( - THTensor_(data)(input), - THTensor_(data)(output), - THIndexTensor_(data)(indices), - THTensor_(data)(randomSamples), - numPlanes, inputT, inputW, inputH, - outputT, outputW, outputH, poolSizeT, poolSizeW, poolSizeH); - } else { - THTensor_(resize5d)(output, numBatch, numPlanes, outputH, outputW, outputT); - /* indices will contain the locations for each output point */ - THIndexTensor_(resize5d)(indices, numBatch, numPlanes, outputH, outputW, outputT); - - long batch; -#pragma omp parallel for private(batch) - for (batch = 0; batch < numBatch; ++batch) { - THNN_(VolumetricFractionalMaxPooling_updateOutput_frame)( - THTensor_(data)(input) + batch * numPlanes * inputH * inputW * inputT, - THTensor_(data)(output) + batch * numPlanes * outputH * outputW * outputT, - THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW * outputT, - THTensor_(data)(randomSamples) + batch * numPlanes * 3, - numPlanes, inputT, inputW, inputH, - outputT, outputW, outputH, poolSizeT, poolSizeW, poolSizeH); - } - } - - /* cleanup */ - THTensor_(free)(input); -} - -static void THNN_(VolumetricFractionalMaxPooling_updateGradInput_frame)( - real* gradInput, - real* gradOutput, - THIndex_t* indices, - long numPlanes, - long inputT, long inputW, long inputH, - long outputT, long outputW, long outputH) { - long plane; -#pragma omp parallel for private(plane) - for (plane = 0; plane < numPlanes; plane++) { - real* gradInputForPlane = gradInput + plane * inputT * inputW * inputH; - real* gradOutputForPlane = gradOutput + plane * outputT * outputW * outputH; - THIndex_t* indicesForPlane = indices + plane * outputT * outputW * outputH; - - long h, w, t; - for (h = 0; h < outputH; ++h) { - for (w = 0; w < outputW; ++w) { - for (t = 0; t < outputT; ++t) { - long outputIndex = h * outputW * outputT + w * outputT + t; - long index = indicesForPlane[outputIndex] - TH_INDEX_BASE; - THAssert(index >= 0 && index < inputT * inputW * inputH); - - gradInputForPlane[index] += gradOutputForPlane[outputIndex]; - } - } - } - } -} - -void THNN_(VolumetricFractionalMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int outputT, int outputW, int outputH, - int poolSizeT, int poolSizeW, int poolSizeH, - THIndexTensor *indices) { - - long numBatch = 1; - int planeDim = 0; - int heightDim = 1; - int widthDim = 2; - int timeDim = 3; - - long numInputDims = THTensor_(nDimension)(input); - if (numInputDims == 5) { - numBatch = THTensor_(size)(input, 0); - planeDim = 1; - heightDim++; - widthDim++; - timeDim++; - } - - /* sizes */ - long numPlanes = THTensor_(size)(input, planeDim); - long inputH = THTensor_(size)(input, heightDim); - long inputW = THTensor_(size)(input, widthDim); - long inputT = THTensor_(size)(input, timeDim); - - THArgCheck(outputT == THTensor_(size)(gradOutput, timeDim), 3, - "gradOutput time unexpected"); - THArgCheck(outputW == THTensor_(size)(gradOutput, widthDim), 3, - "gradOutput width unexpected"); - THArgCheck(outputH == THTensor_(size)(gradOutput, heightDim), 3, - "gradOutput height unexpected"); - - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - /* backprop */ - if (numInputDims == 4) { - THNN_(VolumetricFractionalMaxPooling_updateGradInput_frame)( - THTensor_(data)(gradInput), - THTensor_(data)(gradOutput), - THIndexTensor_(data)(indices), - numPlanes, inputT, inputW, inputH, outputT, outputW, outputH); - } else { - long batch; -#pragma omp parallel for private(batch) - for (batch = 0; batch < numBatch; ++batch) { - THNN_(VolumetricFractionalMaxPooling_updateGradInput_frame)( - THTensor_(data)(gradInput) + batch * numPlanes * inputH * inputW * inputT, - THTensor_(data)(gradOutput) + batch * numPlanes * outputH * outputW * outputT, - THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW * outputT, - numPlanes, inputT, inputW, inputH, outputT, outputW, outputH); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricFullConvolution.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricFullConvolution.c deleted file mode 100644 index c974fab501..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricFullConvolution.c +++ /dev/null @@ -1,541 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricFullConvolution.c" -#else - -static void THNN_(vol2col)( - const real *data_vol, const int channels, - const int depth, const int height, const int width, - const int kT, const int kH, const int kW, - const int pT, const int pH, const int pW, - const int dT, const int dH, const int dW, - const int dilationT, const int dilationH, const int dilationW, - real *data_col) -{ - int c, t, h, w; - int depth_col = (depth + 2 * pT - (dilationT * (kT - 1) + 1)) / dT + 1; - int height_col = (height + 2 * pH - (dilationH * (kH - 1) + 1)) / dH + 1; - int width_col = (width + 2 * pW - (dilationW * (kW - 1) + 1)) / dW + 1; - int channels_col = channels * kT * kH * kW; - for (c = 0; c < channels_col; ++c) - { - int w_offset = c % kW; - int h_offset = (c / kW) % kH; - int t_offset = (c / kW / kH) % kT; - int c_vol = c / kT / kH / kW; - for (t = 0; t < depth_col; ++t) - { - for (h = 0; h < height_col; ++h) - { - for (w = 0; w < width_col; ++w) - { - int t_pad = t * dT - pT + t_offset * dilationT; - int h_pad = h * dH - pH + h_offset * dilationH; - int w_pad = w * dW - pW + w_offset * dilationW; - if (t_pad >= 0 && t_pad < depth && - h_pad >= 0 && h_pad < height && - w_pad >= 0 && w_pad < width) - data_col[((c * depth_col + t) * height_col + h) * width_col + w] = - data_vol[((c_vol * depth + t_pad) * height + h_pad) * width + w_pad]; - else - data_col[((c * depth_col + t) * height_col + h) * width_col + w] = 0; - } - } - } - } -} - -static void THNN_(col2vol)( - const real* data_col, const int channels, - const int depth, const int height, const int width, - const int kT, const int kH, const int kW, - const int pT, const int pH, const int pW, - const int dT, const int dH, const int dW, - const int dilationT, const int dilationH, const int dilationW, - real* data_vol) -{ - int c, t, h, w; - memset(data_vol, 0, sizeof(real) * depth * height * width * channels); - int depth_col = (depth + 2 * pT - (dilationT * (kT - 1) + 1)) / dT + 1; - int height_col = (height + 2 * pH - (dilationH * (kH - 1) + 1)) / dH + 1; - int width_col = (width + 2 * pW - (dilationW * (kW - 1) + 1)) / dW + 1; - int channels_col = channels * kT * kH * kW; - for (c = 0; c < channels_col; ++c) - { - int w_offset = c % kW; - int h_offset = (c / kW) % kH; - int t_offset = (c / kW / kH) % kT; - int c_vol = c / kT / kH / kW; - for (t = 0; t < depth_col; ++t) - { - for (h = 0; h < height_col; ++h) - { - for (w = 0; w < width_col; ++w) - { - int t_pad = t * dT - pT + t_offset * dilationT; - int h_pad = h * dH - pH + h_offset * dilationH; - int w_pad = w * dW - pW + w_offset * dilationW; - if (t_pad >= 0 && t_pad < depth && - h_pad >= 0 && h_pad < height && - w_pad >= 0 && w_pad < width) - data_vol[((c_vol * depth + t_pad) * height + h_pad) * width + w_pad] += - data_col[((c * depth_col + t) * height_col + h) * width_col + w]; - } - } - } - } -} - -static inline void THNN_(VolumetricFullConvolution_shapeCheck)( - THTensor *input, THTensor *gradOutput, - THTensor *weight, THTensor *bias, - int dT, int dW, int dH, int pT, int pW, int pH, - int aT, int aW, int aH) { - THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, - "4D or 5D (batch mode) tensor expected for input, but got: %s"); - // number of input & output planes and kernel size is indirectly defined by the weight tensor - THNN_ARGCHECK(weight->nDimension == 5, 4, weight, - "5D (nOutputPlane x nInputPlane x kT x kH x kW) tensor " - "expected for weight, but got: %s"); - THArgCheck(dT > 0 && dW > 0 && dH > 0, 11, - "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); - THArgCheck(aT < dT && aW < dW && aH < dH, 15, - "output adjustment must be smaller than stride, but got " - "adjT: %d adjH: %d adjW: %d dT: %d dH: %d dW: %d", - aT, aH, aW, dT, dH, dW); - - int ndim = input->nDimension; - const int nInputPlane = (int)weight->size[0]; - const int nOutputPlane = (int)weight->size[1]; - const int kT = (int)weight->size[2]; - const int kH = (int)weight->size[3]; - const int kW = (int)weight->size[4]; - - if (bias != NULL) { - THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[1]); - } - - int dimf = 0; - int dimd = 1; - int dimh = 2; - int dimw = 3; - - if (ndim == 5) { - dimf++; - dimd++; - dimh++; - dimw++; - } - - const long inputWidth = input->size[dimw]; - const long inputHeight = input->size[dimh]; - const long inputDepth = input->size[dimd]; - const long outputWidth = (inputWidth - 1) * dW - 2*pW + kW + aW; - const long outputHeight = (inputHeight - 1) * dH - 2*pH + kH + aH; - const long outputDepth = (inputDepth - 1) * dT - 2*pT + kT + aT; - - if (outputDepth < 1 || outputWidth < 1 || outputHeight < 1) - THError("Given input size: (%dx%dx%dx%d). Calculated output size: (%dx%dx%dx%d). Output size is too small", - nInputPlane,inputDepth,inputHeight,inputWidth,nOutputPlane,outputDepth,outputHeight,outputWidth); - - THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimd, outputDepth); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); - } -} - -void THNN_(VolumetricFullConvolution_updateOutput)( - THNNState *state, - THTensor *input, // 4D or 5D (batch) tensor - THTensor *output, - THTensor *weight, // weight tensor (nInputPlane x nOutputPlane x kT x kH x kW) - THTensor *bias, - THTensor *finput, // internal columns buffer - THTensor *fgradInput, // internal ones buffer - int dT, int dW, int dH, // stride of the convolution - int pT, int pW, int pH, // padding - int aT, int aW, int aH) // extra output adjustment -{ - THTensor *columns = finput; - THTensor *ones = fgradInput; - - THNN_(VolumetricFullConvolution_shapeCheck)( - input, NULL, weight, bias, - dT, dW, dH, pT, pW, pH, aT, aW, aH); - - const int nInputPlane = (int)weight->size[0]; - const int nOutputPlane = (int)weight->size[1]; - const int kT = (int)weight->size[2]; - const int kH = (int)weight->size[3]; - const int kW = (int)weight->size[4]; - - input = THTensor_(newContiguous)(input); - weight = THTensor_(newContiguous)(weight); - bias = bias ? THTensor_(newContiguous)(bias) : bias; - int batch = 1; - if (input->nDimension == 4) - { - // Force batch - batch = 0; - THTensor_(resize5d)(input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); - } - - const long inputWidth = input->size[4]; - const long inputHeight = input->size[3]; - const long inputDepth = input->size[2]; - const long outputWidth = (inputWidth - 1) * dW - 2*pW + kW + aW; - const long outputHeight = (inputHeight - 1) * dH - 2*pH + kH + aH; - const long outputDepth = (inputDepth - 1) * dT - 2*pT + kT + aT; - - // Batch size + input planes - const long batchSize = input->size[0]; - - // Resize output - THTensor_(resize5d)(output, batchSize, nOutputPlane, outputDepth, outputHeight, outputWidth); - - // Resize temporary columns - THTensor_(resize2d)(columns, nOutputPlane*kW*kH*kT, inputDepth*inputHeight*inputWidth); - THTensor_(zero)(columns); - - // Define a buffer of ones, for bias accumulation - // Note: this buffer can be shared with other modules, it only ever gets increased, - // and always contains ones. - if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) - { - // Resize plane and fill with ones... - THTensor_(resize3d)(ones, outputDepth, outputHeight, outputWidth); - THTensor_(fill)(ones, 1); - } - - // Helpers - THTensor *input_n = THTensor_(new)(); - THTensor *output_n = THTensor_(new)(); - - int elt; - // For each elt in batch, do: - for (elt = 0; elt < batchSize; ++elt) - { - // Matrix mulitply per output: - THTensor_(select)(input_n, input, 0, elt); - THTensor_(select)(output_n, output, 0, elt); - - // M,N,K are dims of matrix A and B - // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) - const long m = weight->size[1] * weight->size[2] * weight->size[3] * weight->size[4]; - const long n = columns->size[1]; - const long k = weight->size[0]; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 'n', 't', - n, m, k, - 1, - THTensor_(data)(input_n), n, - THTensor_(data)(weight), m, - 0, - THTensor_(data)(columns), n - ); - - // Unpack columns back into input: - THNN_(col2vol)( - THTensor_(data)(columns), - nOutputPlane, outputDepth, outputHeight, outputWidth, - kT, kH, kW, - pT, pH, pW, - dT, dH, dW, - 1, 1, 1, - THTensor_(data)(output_n) - ); - - // Do Bias after: - // M,N,K are dims of matrix A and B - // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) - const long m_ = nOutputPlane; - const long n_ = outputDepth * outputHeight * outputWidth; - const long k_ = 1; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - if (bias) { - THBlas_(gemm)( - 't', 'n', - n_, m_, k_, - 1, - THTensor_(data)(ones), k_, - THTensor_(data)(bias), k_, - 1, - THTensor_(data)(output_n), n_ - ); - } - } - - // Free - THTensor_(free)(input_n); - THTensor_(free)(output_n); - - // Resize output - if (batch == 0) - { - THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth); - THTensor_(resize4d)(input, nInputPlane, inputDepth, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(weight); - if (bias) THTensor_(free)(bias); -} - -void THNN_(VolumetricFullConvolution_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THTensor *weight, - THTensor *finput, - THTensor *fgradInput, // only used by cuda impl - int dT, int dW, int dH, // stride - int pT, int pW, int pH, // padding - int aT, int aW, int aH) // extra output adjustment -{ - THTensor *gradColumns = finput; - - // number of input & output planes and kernel size is indirectly defined by the weight tensor - THNN_(VolumetricFullConvolution_shapeCheck)( - input, gradOutput, weight, NULL, - dT, dW, dH, pT, pW, pH, aT, aW, aH); - - const int nInputPlane = (int)weight->size[0]; - const int nOutputPlane = (int)weight->size[1]; - const int kT = (int)weight->size[2]; - const int kH = (int)weight->size[3]; - const int kW = (int)weight->size[4]; - - input = THTensor_(newContiguous)(input); - weight = THTensor_(newContiguous)(weight); - gradOutput = THTensor_(newContiguous)(gradOutput); - - int batch = 1; - if (input->nDimension == 4) - { - // Force batch - batch = 0; - THTensor_(resize5d)(input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); - THTensor_(resize5d)(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); - } - - const long inputWidth = input->size[4]; - const long inputHeight = input->size[3]; - const long inputDepth = input->size[2]; - const long outputWidth = (inputWidth - 1) * dW - 2*pW + kW + aW; - const long outputHeight = (inputHeight - 1) * dH - 2*pH + kH + aH; - const long outputDepth = (inputDepth - 1) * dT - 2*pT + kT + aT; - - // Batch size + input planes - const long batchSize = input->size[0]; - - // Resize output - THTensor_(resize5d)(gradInput, batchSize, nInputPlane, inputDepth, inputHeight, inputWidth); - THTensor_(zero)(gradInput); - - // Resize temporary columns - THTensor_(resize2d)(gradColumns, nOutputPlane*kW*kH*kT, inputDepth*inputHeight*inputWidth); - - // Helpers - THTensor *gradInput_n = THTensor_(new)(); - THTensor *gradOutput_n = THTensor_(new)(); - - int elt; - // For each elt in batch, do: - for (elt = 0; elt < batchSize; ++elt) - { - // Matrix mulitply per sample: - THTensor_(select)(gradInput_n, gradInput, 0, elt); - THTensor_(select)(gradOutput_n, gradOutput, 0, elt); - - // Extract columns: - THNN_(vol2col)( - THTensor_(data)(gradOutput_n), - nOutputPlane, outputDepth, outputHeight, outputWidth, - kT, kH, kW, - pT, pH, pW, - dT, dH, dW, - 1, 1, 1, - THTensor_(data)(gradColumns) - ); - - // M,N,K are dims of matrix A and B - // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) - const long m = weight->size[0]; - const long n = gradColumns->size[1]; - const long k = weight->size[1] * weight->size[2] * weight->size[3] * weight->size[4]; - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 'n', 'n', - n, m, k, - 1, - THTensor_(data)(gradColumns), n, - THTensor_(data)(weight), k, - 0, - THTensor_(data)(gradInput_n), n - ); - } - - // Free - THTensor_(free)(gradInput_n); - THTensor_(free)(gradOutput_n); - - // Resize output - if (batch == 0) - { - THTensor_(resize4d)(gradOutput, nOutputPlane, outputDepth, outputHeight, outputWidth); - THTensor_(resize4d)(input, nInputPlane, inputDepth, inputHeight, inputWidth); - THTensor_(resize4d)(gradInput, nInputPlane, inputDepth, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); - THTensor_(free)(weight); -} - -void THNN_(VolumetricFullConvolution_accGradParameters)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradWeight, - THTensor *gradBias, - THTensor *finput, - THTensor *fgradInput, - int dT, int dW, int dH, // stride - int pT, int pW, int pH, // padding - int aT, int aW, int aH, // extra output adjustment - accreal scale_) -{ - real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); - // number of input & output planes and kernel size is indirectly defined by the gradWeight tensor - THNN_(VolumetricFullConvolution_shapeCheck)( - input, gradOutput, gradWeight, gradBias, - dT, dW, dH, pT, pW, pH, aT, aW, aH); - - int nInputPlane = (int)gradWeight->size[0]; - int nOutputPlane = (int)gradWeight->size[1]; - int kT = (int)gradWeight->size[2]; - int kH = (int)gradWeight->size[3]; - int kW = (int)gradWeight->size[4]; - - THTensor *columns = finput; - THTensor *ones = fgradInput; - - input = THTensor_(newContiguous)(input); - gradOutput = THTensor_(newContiguous)(gradOutput); - THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); - if (gradBias) - THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); - - int batch = 1; - if (input->nDimension == 4) - { - // Force batch - batch = 0; - THTensor_(resize5d)(input, 1, input->size[0], input->size[1], input->size[2], input->size[3]); - THTensor_(resize5d)(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2], gradOutput->size[3]); - } - - const long inputWidth = input->size[4]; - const long inputHeight = input->size[3]; - const long inputDepth = input->size[2]; - const long outputWidth = (inputWidth - 1) * dW - 2*pW + kW + aW; - const long outputHeight = (inputHeight - 1) * dH - 2*pH + kH + aH; - const long outputDepth = (inputDepth - 1) * dT - 2*pT + kT + aT; - - // Batch size + input planes - const long batchSize = input->size[0]; - - // Define a buffer of ones, for bias accumulation - if (ones->nDimension != 3 || ones->size[0]*ones->size[1]*ones->size[2] < outputDepth*outputHeight*outputWidth) - { - // Resize plane and fill with ones... - THTensor_(resize3d)(ones, outputDepth, outputHeight, outputWidth); - THTensor_(fill)(ones, 1); - } - - // Resize temporary columns - THTensor_(resize2d)(columns, nOutputPlane*kW*kH*kT, inputDepth*inputHeight*inputWidth); - - // Helpers - THTensor *input_n = THTensor_(new)(); - THTensor *gradOutput_n = THTensor_(new)(); - - int elt; - // For each elt in batch, do: - for (elt = 0; elt < batchSize; ++elt) - { - // Matrix mulitply per output: - THTensor_(select)(input_n, input, 0, elt); - THTensor_(select)(gradOutput_n, gradOutput, 0, elt); - - // Extract columns: - THNN_(vol2col)( - THTensor_(data)(gradOutput_n), nOutputPlane, - outputDepth, outputHeight, outputWidth, - kT, kH, kW, - pT, pH, pW, - dT, dH, dW, - 1, 1, 1, - THTensor_(data)(columns) - ); - - // M,N,K are dims of matrix A and B - // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) - const long n = columns->size[0]; // nOutputPlane * kt * kh * kw - const long m = input_n->size[0]; // nInputPlane - const long k = columns->size[1]; // inputHeight * inputWidth - - // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) - THBlas_(gemm)( - 't', 'n', - n, m, k, - scale, - THTensor_(data)(columns), k, - THTensor_(data)(input_n), k, - 1, - THTensor_(data)(gradWeight), n - ); - - // Do Bias: - // M,N,K are dims of matrix A and B - // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) - const long m_ = nOutputPlane; - const long k_ = outputDepth * outputHeight * outputWidth; - - // Do GEMV (note: this is a bit confusing because gemv assumes column-major matrices) - if (gradBias) { - THBlas_(gemv)( - 't', - k_, m_, - scale, - THTensor_(data)(gradOutput_n), k_, - THTensor_(data)(ones), 1, - 1, - THTensor_(data)(gradBias), 1 - ); - } - } - - // Free - THTensor_(free)(input_n); - THTensor_(free)(gradOutput_n); - - // Resize - if (batch == 0) - { - THTensor_(resize4d)(gradOutput, nOutputPlane, outputDepth, outputHeight, outputWidth); - THTensor_(resize4d)(input, nInputPlane, inputDepth, inputHeight, inputWidth); - } - - THTensor_(free)(input); - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricMaxPooling.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricMaxPooling.c deleted file mode 100644 index a3601e0b62..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricMaxPooling.c +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricMaxPooling.c" -#else - -void THNN_(VolumetricMaxPooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH, - bool ceilMode) -{ - THNN_(VolumetricDilatedMaxPooling_updateOutput)( - state, input, output, indices, - kT, kW, kH, dT, dW, dH, - pT, pW, pH, 1, 1, 1, ceilMode); -} - -void THNN_(VolumetricMaxPooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int kT, - int kW, - int kH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH, - bool ceilMode) -{ - THNN_(VolumetricDilatedMaxPooling_updateGradInput)( - state, input, gradOutput, gradInput, indices, - kT, kW, kH, dT, dW, dH, - pT, pW, pH, 1, 1, 1, ceilMode); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricMaxUnpooling.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricMaxUnpooling.c deleted file mode 100644 index d9d9e59510..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricMaxUnpooling.c +++ /dev/null @@ -1,373 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricMaxUnpooling.c" -#else - -static inline void THNN_(VolumetricMaxUnpooling_shapeCheck)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THIndexTensor *indices, - int oT, - int oW, - int oH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) -{ - THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, - "4D or 5D (batch mode) tensor expected for input, but got: %s"); - - THNN_CHECK_SHAPE_INDICES(input, indices); - - THArgCheck(dT > 0 && dW > 0 && dH > 0, 10, - "stride should be greater than zero, but got dT: %d dH: %d dW: %d", - dT, dH, dW); - - int dimw = 3; - int dimh = 2; - int dimt = 1; - int dimn = 0; - - if (input->nDimension == 5) - { - dimt++; - dimw++; - dimh++; - dimn++; - } - int nslices = input->size[dimn]; - - if (gradOutput != NULL) { - if (oT != gradOutput->size[dimt] || oW != gradOutput->size[dimw] || oH != gradOutput->size[dimh]) - { - THError( - "Inconsistent gradOutput size. oT= %d, oH= %d, oW= %d, gradOutput: %dx%dx%d", - oT, oH, oW, gradOutput->size[dimt], gradOutput->size[dimh], gradOutput->size[dimw] - ); - } - - THNN_CHECK_DIM_SIZE(gradOutput, input->nDimension, dimn, nslices); - } -} - -static void THNN_(VolumetricMaxUnpooling_updateOutput_frame)( - real *input_p, - real *output_p, - THIndex_t *ind_p, - int nslices, - int iT, - int iW, - int iH, - int oT, - int oW, - int oH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) -{ - int k; - int has_error = 0; - THIndex_t error_index; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - int ti, i, j, maxz, maxy, maxx; - for (ti = 0; ti < iT; ti++) - { - for (i = 0; i < iH; i++) - { - for (j = 0; j < iW; j++) - { - int start_t = ti * dT - pT; - int start_h = i * dH - pH; - int start_w = j * dW - pW; - - real *input_p_k = input_p + k*iT*iW*iH + ti*iW*iH + i*iW + j; - THIndex_t *ind_p_k = ind_p + k*iT*iW*iH + ti*iW*iH + i*iW + j; - - maxz = ((unsigned char*)(ind_p_k))[0]; /* retrieve position of max */ - maxy = ((unsigned char*)(ind_p_k))[1]; - maxx = ((unsigned char*)(ind_p_k))[2]; - - THIndex_t idx = k*oT*oW*oH + oH*oW*(start_t+maxz) + oW*(start_h+maxy) + (start_w+maxx); - if (start_t+maxz<0 || start_h+maxy<0 || start_w+maxx<0 || start_t+maxz>=oT - || start_h+maxy>=oH || start_w+maxx>=oW) - { -#pragma omp critical - { - has_error = 1; - error_index = idx; - } - } else { - output_p[idx] = *input_p_k; /* update output */ - } - } - } - } - } - if (has_error) { - THError( - "found an invalid max index %ld (output volumes are of size %dx%dx%d)", - error_index, oT, oH, oW - ); - } -} - -void THNN_(VolumetricMaxUnpooling_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - THIndexTensor *indices, - int oT, - int oW, - int oH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) -{ - int dimw = 3; - int dimh = 2; - int dimt = 1; - int nbatch = 1; - int nslices; - int iT; - int iH; - int iW; - real *input_data; - real *output_data; - THIndex_t *indices_data; - - THNN_(VolumetricMaxUnpooling_shapeCheck)( - state, input, NULL, indices, - oT, oW, oH, dT, dW, dH, pT, pW, pH); - - if (input->nDimension == 5) - { - nbatch = input->size[0]; - dimt++; - dimw++; - dimh++; - } - - /* sizes */ - nslices = input->size[dimt-1]; - iT = input->size[dimt]; - iH = input->size[dimh]; - iW = input->size[dimw]; - - /* get contiguous input */ - input = THTensor_(newContiguous)(input); - indices = THIndexTensor_(newContiguous)(indices); - - /* resize output */ - if (input->nDimension == 4) - { - THTensor_(resize4d)(output, nslices, oT, oH, oW); - THTensor_(zero)(output); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - - THNN_(VolumetricMaxUnpooling_updateOutput_frame)( - input_data, output_data, - indices_data, - nslices, - iT, iW, iH, - oT, oW, oH, - dT, dW, dH, pT, pW, pH - ); - } - else - { - int p; - - THTensor_(resize5d)(output, nbatch, nslices, oT, oH, oW); - THTensor_(zero)(output); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - indices_data = THIndexTensor_(data)(indices); - - for (p = 0; p < nbatch; p++) - { - THNN_(VolumetricMaxUnpooling_updateOutput_frame)( - input_data+p*nslices*iT*iW*iH, - output_data+p*nslices*oT*oW*oH, - indices_data+p*nslices*iT*iW*iH, - nslices, - iT, iW, iH, - oT, oW, oH, - dT, dW, dH, - pT, pW, pH - ); - } - } - - /* cleanup */ - THTensor_(free)(input); - THIndexTensor_(free)(indices); -} - -static void THNN_(VolumetricMaxUnpooling_updateGradInput_frame)( - real *gradInput_p, - real *gradOutput_p, - THIndex_t *ind_p, - int nslices, - int iT, - int iW, - int iH, - int oT, - int oW, - int oH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) -{ - int k; -#pragma omp parallel for private(k) - for (k = 0; k < nslices; k++) - { - int ti, i, j, maxz, maxy, maxx; - for (ti = 0; ti < iT; ti++) - { - for (i = 0; i < iH; i++) - { - for (j = 0; j < iW; j++) - { - int start_t = ti * dT - pT; - int start_h = i * dH - pH; - int start_w = j * dW - pW; - - real *gradInput_p_k = gradInput_p + k*iT*iW*iH + ti*iW*iH + i*iW + j; - THIndex_t *ind_p_k = ind_p + k*iT*iW*iH + ti*iW*iH + i*iW + j; - - maxz = ((unsigned char*)(ind_p_k))[0]; /* retrieve position of max */ - maxy = ((unsigned char*)(ind_p_k))[1]; - maxx = ((unsigned char*)(ind_p_k))[2]; - - if (start_t+maxz<0 || start_h+maxy<0 || start_w+maxx<0 - || start_t+maxz>=oT || start_h+maxy>=oH || start_w+maxx>=oW) - { - THError( - "invalid max index z= %d, y= %d, x= %d, oT= %d, oW= %d, oH= %d", - start_t+maxz, start_h+maxy, start_w+maxx, oT, oW, oH - ); - } - *gradInput_p_k = gradOutput_p[k*oT*oW*oH + oH*oW*(start_t+maxz) - + oW*(start_h+maxy) + (start_w+maxx)]; /* update gradient */ - } - } - } - } -} - -void THNN_(VolumetricMaxUnpooling_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - THIndexTensor *indices, - int oT, - int oW, - int oH, - int dT, - int dW, - int dH, - int pT, - int pW, - int pH) -{ - int dimw = 3; - int dimh = 2; - int dimt = 1; - int nbatch = 1; - int nslices; - int iT; - int iH; - int iW; - real *gradInput_data; - real *gradOutput_data; - THIndex_t *indices_data; - - THNN_(VolumetricMaxUnpooling_shapeCheck)( - state, input, gradOutput, indices, - oT, oW, oH, dT, dW, dH, pT, pW, pH); - - // TODO: check gradOutput shape - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - indices = THIndexTensor_(newContiguous)(indices); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - if (input->nDimension == 5) - { - nbatch = input->size[0]; - dimt++; - dimw++; - dimh++; - } - - /* sizes */ - nslices = input->size[dimt-1]; - iT = input->size[dimt]; - iH = input->size[dimh]; - iW = input->size[dimw]; - - /* get raw pointers */ - gradInput_data = THTensor_(data)(gradInput); - gradOutput_data = THTensor_(data)(gradOutput); - indices_data = THIndexTensor_(data)(indices); - - /* backprop */ - if (input->nDimension == 4) - { - THNN_(VolumetricMaxUnpooling_updateGradInput_frame)( - gradInput_data, gradOutput_data, - indices_data, - nslices, - iT, iW, iH, - oT, oW, oH, - dT, dW, dH, - pT, pW, pH - ); - } - else - { - int p; - for (p = 0; p < nbatch; p++) - { - THNN_(VolumetricMaxUnpooling_updateGradInput_frame)( - gradInput_data+p*nslices*iT*iW*iH, - gradOutput_data+p*nslices*oT*oW*oH, - indices_data+p*nslices*iT*iW*iH, - nslices, - iT, iW, iH, - oT, oW, oH, - dT, dW, dH, - pT, pW, pH - ); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); - THIndexTensor_(free)(indices); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricReplicationPadding.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricReplicationPadding.c deleted file mode 100644 index 4d8993ec20..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricReplicationPadding.c +++ /dev/null @@ -1,357 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricReplicationPadding.c" -#else - -static inline void THNN_(VolumetricReplicationPadding_shapeCheck)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - int pleft, int pright, - int ptop, int pbottom, - int pfront, int pback) { - int dimw = 3; - int dimh = 2; - int dimd = 1; - int dimslices = 0; - long nslices; - long idepth; - long iheight; - long iwidth; - long odepth; - long oheight; - long owidth; - - THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, - "4D or 5D (batch mode) tensor expected for input, but got: %s"); - - if (input->nDimension == 5) - { - dimw++; - dimh++; - dimd++; - dimslices++; - } - - /* sizes */ - nslices = input->size[dimslices]; - idepth = input->size[dimd]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - odepth = idepth + pfront + pback; - oheight = iheight + ptop + pbottom; - owidth = iwidth + pleft + pright; - - THArgCheck(owidth >= 1 || oheight >= 1 || odepth >= 1, 2, - "input (D: %d H: %d, W: %d)is too small." - " Calculated output D: %d H: %d W: %d", - idepth, iheight, iwidth, odepth, oheight, owidth); - - if (gradOutput != NULL) { - THArgCheck(nslices == THTensor_(size)(gradOutput, dimslices), 3, - "gradOutput width unexpected. Expected: %d, Got: %d", - nslices, THTensor_(size)(gradOutput, dimslices)); - THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3, - "gradOutput width unexpected. Expected: %d, Got: %d", - owidth, THTensor_(size)(gradOutput, dimw)); - THArgCheck(oheight == THTensor_(size)(gradOutput, dimh), 3, - "gradOutput height unexpected. Expected: %d, Got: %d", - oheight, THTensor_(size)(gradOutput, dimh)); - THArgCheck(odepth == THTensor_(size)(gradOutput, dimd), 3, - "gradOutput depth unexpected. Expected: %d, Got: %d", - odepth, THTensor_(size)(gradOutput, dimd)); - } -} - -static void THNN_(VolumetricReplicationPadding_updateOutput_frame)( - real *input_p, real *output_p, - long nslices, - long iwidth, long iheight, long idepth, - long owidth, long oheight, long odepth, - int pleft, int pright, - int ptop, int pbottom, - int pfront, int pback) -{ - int iStartX = fmax(0, -pleft); - int iStartY = fmax(0, -ptop); - int iStartZ = fmax(0, -pfront); - int oStartX = fmax(0, pleft); - int oStartY = fmax(0, ptop); - int oStartZ = fmax(0, pfront); - - long k, ip_x, ip_y, ip_z; -#pragma omp parallel for private(k, ip_x, ip_y, ip_z) - for (k = 0; k < nslices; k++) { - long i, j, z; - for (z = 0; z < odepth; z++) { - for (i = 0; i < oheight; i++) { - for (j = 0; j < owidth; j++) { - if (j < pleft) { - ip_x = pleft; - } else if (j >= pleft && j < iwidth + pleft) { - ip_x = j; - } else { - ip_x = iwidth + pleft - 1; - } - ip_x = ip_x - oStartX + iStartX; - - if (i < ptop) { - ip_y = ptop; - } else if (i >= ptop && i < iheight + ptop) { - ip_y = i; - } else { - ip_y = iheight + ptop - 1; - } - ip_y = ip_y - oStartY + iStartY; - - if (z < pfront) { - ip_z = pfront; - } else if (z >= pfront && z < idepth + pfront) { - ip_z = z; - } else { - ip_z = idepth + pfront - 1; - } - ip_z = ip_z - oStartZ + iStartZ; - - real *dest_p = output_p + k * owidth * oheight * odepth + - z * owidth * oheight + i * owidth + j; - real *src_p = input_p + k * iwidth * iheight * idepth + - ip_z * iwidth * iheight + ip_y * iwidth + ip_x; - *dest_p = *src_p; - } - } - } - } -} - -void THNN_(VolumetricReplicationPadding_updateOutput)(THNNState *state, - THTensor *input, - THTensor *output, - int pleft, int pright, - int ptop, int pbottom, - int pfront, int pback) -{ - int dimw = 3; - int dimh = 2; - int dimd = 1; - int dimslices = 0; - long nbatch = 1; - long nslices; - long idepth; - long iheight; - long iwidth; - long odepth; - long oheight; - long owidth; - real *input_data; - real *output_data; - -THNN_(VolumetricReplicationPadding_shapeCheck)( - state, input, NULL, pleft, pright, - ptop, pbottom, pfront, pback); - - if (input->nDimension == 5) - { - nbatch = input->size[0]; - dimw++; - dimh++; - dimd++; - dimslices++; - } - - /* sizes */ - nslices = input->size[dimslices]; - idepth = input->size[dimd]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - odepth = idepth + pfront + pback; - oheight = iheight + ptop + pbottom; - owidth = iwidth + pleft + pright; - - /* get contiguous input */ - input = THTensor_(newContiguous)(input); - - /* resize output */ - if (input->nDimension == 4) - { - THTensor_(resize4d)(output, nslices, odepth, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - - THNN_(VolumetricReplicationPadding_updateOutput_frame)( - input_data, output_data, nslices, iwidth, iheight, idepth, - owidth, oheight, odepth, pleft, pright, ptop, pbottom, pfront, - pback); - } - else - { - long p; - - THTensor_(resize5d)(output, nbatch, nslices, odepth, oheight, owidth); - - input_data = THTensor_(data)(input); - output_data = THTensor_(data)(output); - -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) - { - THNN_(VolumetricReplicationPadding_updateOutput_frame)( - input_data + p * nslices * iwidth * iheight * idepth, - output_data + p * nslices * owidth * oheight * odepth, - nslices, - iwidth, iheight, idepth, - owidth, oheight, odepth, - pleft, pright, - ptop, pbottom, - pfront, pback); - } - } - - /* cleanup */ - THTensor_(free)(input); -} - -static void THNN_(VolumetricReplicationPadding_updateGradInput_frame)( - real *ginput_p, real *goutput_p, - long nslices, - long iwidth, long iheight, long idepth, - long owidth, long oheight, long odepth, - int pleft, int pright, - int ptop, int pbottom, - int pfront, int pback) -{ - int iStartX = fmax(0, -pleft); - int iStartY = fmax(0, -ptop); - int iStartZ = fmax(0, -pfront); - int oStartX = fmax(0, pleft); - int oStartY = fmax(0, ptop); - int oStartZ = fmax(0, pfront); - - long k, ip_x, ip_y, ip_z; -#pragma omp parallel for private(k, ip_x, ip_y, ip_z) - for (k = 0; k < nslices; k++) { - long i, j, z; - for (z = 0; z < odepth; z++) { - for (i = 0; i < oheight; i++) { - for (j = 0; j < owidth; j++) { - if (j < pleft) { - ip_x = pleft; - } else if (j >= pleft && j < iwidth + pleft) { - ip_x = j; - } else { - ip_x = iwidth + pleft - 1; - } - ip_x = ip_x - oStartX + iStartX; - - if (i < ptop) { - ip_y = ptop; - } else if (i >= ptop && i < iheight + ptop) { - ip_y = i; - } else { - ip_y = iheight + ptop - 1; - } - ip_y = ip_y - oStartY + iStartY; - - if (z < pfront) { - ip_z = pfront; - } else if (z >= pfront && z < idepth + pfront) { - ip_z = z; - } else { - ip_z = idepth + pfront - 1; - } - ip_z = ip_z - oStartZ + iStartZ; - - real *src_p = goutput_p + k * owidth * oheight * odepth + - z * owidth * oheight + i * owidth + j; - real *dest_p = ginput_p + k * iwidth * iheight * idepth + - ip_z * iwidth * iheight + ip_y * iwidth + ip_x; - *dest_p += *src_p; - } - } - } - } -} - -void THNN_(VolumetricReplicationPadding_updateGradInput)(THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int pleft, int pright, - int ptop, int pbottom, - int pfront, int pback) -{ - int dimw = 3; - int dimh = 2; - int dimd = 1; - int dimslices = 0; - long nbatch = 1; - long nslices; - long idepth; - long iheight; - long iwidth; - long odepth; - long oheight; - long owidth; - - if (input->nDimension == 5) - { - nbatch = input->size[0]; - dimw++; - dimh++; - dimd++; - dimslices++; - } - - /* sizes */ - nslices = input->size[dimslices]; - idepth = input->size[dimd]; - iheight = input->size[dimh]; - iwidth = input->size[dimw]; - odepth = idepth + pfront + pback; - oheight = iheight + ptop + pbottom; - owidth = iwidth + pleft + pright; - - -THNN_(VolumetricReplicationPadding_shapeCheck)( - state, input, NULL, pleft, pright, - ptop, pbottom, pfront, pback); - - /* get contiguous gradOutput */ - gradOutput = THTensor_(newContiguous)(gradOutput); - - /* resize */ - THTensor_(resizeAs)(gradInput, input); - THTensor_(zero)(gradInput); - - /* backprop */ - if (input->nDimension == 4) { - THNN_(VolumetricReplicationPadding_updateGradInput_frame)( - THTensor_(data)(gradInput), - THTensor_(data)(gradOutput), - nslices, - iwidth, iheight, idepth, - owidth, oheight, odepth, - pleft, pright, - ptop, pbottom, - pfront, pback); - } else { - long p; -#pragma omp parallel for private(p) - for (p = 0; p < nbatch; p++) { - THNN_(VolumetricReplicationPadding_updateGradInput_frame)( - THTensor_(data)(gradInput) + p * nslices * idepth * iheight * iwidth, - THTensor_(data)(gradOutput) + p * nslices * odepth * oheight * owidth, - nslices, - iwidth, iheight, idepth, - owidth, oheight, odepth, - pleft, pright, - ptop, pbottom, - pfront, pback); - } - } - - /* cleanup */ - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricUpSamplingNearest.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricUpSamplingNearest.c deleted file mode 100644 index 9068fb58d2..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricUpSamplingNearest.c +++ /dev/null @@ -1,226 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricUpSamplingNearest.c" -#else - - -static inline void THNN_(VolumetricUpSamplingNearest_shapeCheck) - (THTensor *input, THTensor *gradOutput, - int scale_factor) { - THArgCheck(input != NULL, 2, "5D input tensor expected but got NULL"); - THArgCheck(scale_factor > 1, 4, - "scale_factor must be greater than 1, but got: %d", scale_factor); - THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input, - "4D or 5D input tensor expected but got: %s"); - if (input->nDimension == 4) { - int nChannels = THTensor_(size)(input, 0); - int inputDepth = THTensor_(size)(input, 1); - int inputHeight = THTensor_(size)(input, 2); - int inputWidth = THTensor_(size)(input, 3); - int outputDepth = inputDepth * scale_factor; - int outputHeight = inputHeight * scale_factor; - int outputWidth = inputWidth * scale_factor; - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, 4, 0, nChannels); - THNN_CHECK_DIM_SIZE(gradOutput, 4, 1, outputDepth); - THNN_CHECK_DIM_SIZE(gradOutput, 4, 2, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, 4, 3, outputWidth); - } - } else { - int nBatch = THTensor_(size)(input, 0); - int nChannels = THTensor_(size)(input, 1); - int inputDepth = THTensor_(size)(input, 2); - int inputHeight = THTensor_(size)(input, 3); - int inputWidth = THTensor_(size)(input, 4); - int outputDepth = inputDepth * scale_factor; - int outputHeight = inputHeight * scale_factor; - int outputWidth = inputWidth * scale_factor; - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, 5, 0, nBatch); - THNN_CHECK_DIM_SIZE(gradOutput, 5, 1, nChannels); - THNN_CHECK_DIM_SIZE(gradOutput, 5, 2, outputDepth); - THNN_CHECK_DIM_SIZE(gradOutput, 5, 3, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, 5, 4, outputWidth); - } - } -} - -void THNN_(VolumetricUpSamplingNearest_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int scale_factor) -{ - THNN_(VolumetricUpSamplingNearest_shapeCheck)(input, NULL, scale_factor); - int inputDepth = THTensor_(size)(input, input->nDimension-3); - int inputHeight = THTensor_(size)(input, input->nDimension-2); - int inputWidth = THTensor_(size)(input, input->nDimension-1); - int outputDepth = inputDepth * scale_factor; - int outputHeight = inputHeight * scale_factor; - int outputWidth = inputWidth * scale_factor; - - if (input->nDimension == 4) { - THTensor_(resize4d)(output, - THTensor_(size)(input, 0), - outputDepth, outputHeight, outputWidth); - } else { - THTensor_(resize5d)(output, - THTensor_(size)(input, 0), - THTensor_(size)(input, 1), - outputDepth, outputHeight, outputWidth); - } - - int dT = scale_factor; - int dW = scale_factor; - int dH = scale_factor; - int xDim = input->nDimension-3; - int yDim = input->nDimension-2; - int zDim = input->nDimension-1; - - // dims - int idim = input->nDimension; - int osz0 = output->size[0]; - int osz1 = output->size[1]; - int osz2 = output->size[2]; - int osz3 = output->size[3]; - int osz4 = 1; - if (idim > 4) { - osz4 = output->size[4]; - } - - // get strides - long *is = input->stride; - long *os = output->stride; - - // get raw pointers - real *pin = THTensor_(data)(input); - real *pout = THTensor_(data)(output); - - // perform the upsampling - int i0, i1, i2, i3, i4, isrc, idst; - int iout[5]; // Output indices - int iin[5]; // Input indices - - for (i0 = 0; i0 < osz0; i0++) { - iout[0] = i0; - iin[0] = i0; - for (i1 = 0; i1 < osz1; i1++) { - iout[1] = i1; - iin[1] = i1; - for (i2 = 0; i2 < osz2; i2++) { - iout[2] = i2; - iin[2] = i2; - for (i3 = 0; i3 < osz3; i3++) { - iout[3] = i3; - iin[3] = i3; - for (i4 = 0; i4 < osz4; i4++) { - iout[4] = i4; - iin[4] = i4; - - // set the indices for the upsampled dimensions - iin[xDim] = iout[xDim] / dW; - iin[yDim] = iout[yDim] / dH; - iin[zDim] = iout[zDim] / dT; - - idst = i0*os[0] + i1*os[1] + i2*os[2] + i3*os[3]; - isrc = iin[0]*is[0] + iin[1]*is[1] + iin[2]*is[2] + iin[3]*is[3]; - if (idim > 4) { - idst += i4*os[4]; - isrc += iin[4]*is[4]; - } - - pout[idst] = pin[isrc]; - } - } - } - } - } -} - -void THNN_(VolumetricUpSamplingNearest_updateGradInput)( - THNNState *state, - THTensor *input, - THTensor *gradOutput, - THTensor *gradInput, - int scale_factor) -{ - THNN_(VolumetricUpSamplingNearest_shapeCheck)(input, gradOutput, scale_factor); - THTensor_(resizeAs)(gradInput, input); - - int dW = scale_factor; - int dH = scale_factor; - int dT = scale_factor; - int xDim = gradInput->nDimension-3; - int yDim = gradInput->nDimension-2; - int zDim = gradInput->nDimension-1; - - // dims - int idim = gradInput->nDimension; // Guaranteed to be between 3 and 5 - int isz0 = gradInput->size[0]; - int isz1 = gradInput->size[1]; - int isz2 = gradInput->size[2]; - int isz3 = gradInput->size[3]; - int isz4 = 1; - if (idim > 4) { - isz4 = gradInput->size[4]; - } - - // get strides - long *is = gradInput->stride; - long *os = gradOutput->stride; - - // get raw pointers - real *pin = THTensor_(data)(gradInput); - real *pout = THTensor_(data)(gradOutput); - - // perform the upsampling - int i0, i1, i2, i3, i4, isrc, idst, x, y, z; - int iin[5]; // Input indices - int iout[5]; // Output indices - - THTensor_(zero)(gradInput); - - for (i0 = 0; i0 < isz0; i0++) { - iin[0] = i0; - iout[0] = i0; - for (i1 = 0; i1 < isz1; i1++) { - iin[1] = i1; - iout[1] = i1; - for (i2 = 0; i2 < isz2; i2++) { - iin[2] = i2; - iout[2] = i2; - for (i3 = 0; i3 < isz3; i3++) { - iin[3] = i3; - iout[3] = i3; - - for (i4 = 0; i4 < isz4; i4++) { - iin[4] = i4; - iout[4] = i4; - - idst = i0*is[0] + i1*is[1] + i2*is[2] + i3*is[3]; - if (idim > 4) { - idst += i4*is[4]; - } - - // Now accumulate the gradients from gradOutput - for (z = 0; z < dT; z++) { - for (y = 0; y < dH; y++) { - for (x = 0; x < dW; x++) { - iout[xDim] = dW * iin[xDim] + x; - iout[yDim] = dH * iin[yDim] + y; - iout[zDim] = dT * iin[zDim] + z; - isrc = iout[0]*os[0] + iout[1]*os[1] + iout[2]*os[2] + iout[3]*os[3]; - if (idim > 4) { - isrc += iout[4]*os[4]; - } - pin[idst] += pout[isrc]; - } - } - } - } - } - } - } - } -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricUpSamplingTrilinear.c b/contrib/lua-torch/nn/lib/THNN/generic/VolumetricUpSamplingTrilinear.c deleted file mode 100644 index f2b04dba9f..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/VolumetricUpSamplingTrilinear.c +++ /dev/null @@ -1,213 +0,0 @@ -// Adapted from interp.cpp from Caffe util by Pauline Luc -// Originally developed by George Papandreou - -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/VolumetricUpSamplingTrilinear.c" -#else - -static inline void THNN_(VolumetricUpSamplingTrilinear_shapeCheck) - (THTensor *input, THTensor *gradOutput, - int nBatch, int nChannels, - int inputDepth, int inputHeight, int inputWidth, - int outputDepth, int outputHeight, int outputWidth) { - THArgCheck(inputDepth > 0 && inputHeight > 0 && inputWidth > 0 - && outputDepth > 0 && outputHeight > 0 && outputWidth > 0, 2, - "input and output sizes should be greater than 0," - " but got input (D: %d, H: %d, W: %d) output (D: %d, H: %d, W: %d)", - inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth); - if (input != NULL) { - THNN_ARGCHECK(input->nDimension == 5, 2, input, - "5D input tensor expected but got: %s"); - } - - if (gradOutput != NULL) { - THNN_CHECK_DIM_SIZE(gradOutput, 5, 0, nBatch); - THNN_CHECK_DIM_SIZE(gradOutput, 5, 1, nChannels); - THNN_CHECK_DIM_SIZE(gradOutput, 5, 2, outputDepth); - THNN_CHECK_DIM_SIZE(gradOutput, 5, 3, outputHeight); - THNN_CHECK_DIM_SIZE(gradOutput, 5, 4, outputWidth); - } -} - -void THNN_(VolumetricUpSamplingTrilinear_updateOutput)( - THNNState *state, - THTensor *input, - THTensor *output, - int outputDepth, - int outputHeight, - int outputWidth){ - - int nbatch = THTensor_(size)(input, 0); - int channels = THTensor_(size)(input, 1); - int inputDepth = THTensor_(size)(input, 2); - int inputHeight = THTensor_(size)(input, 3); - int inputWidth = THTensor_(size)(input, 4); - - THNN_(VolumetricUpSamplingTrilinear_shapeCheck) - (input, NULL, - nbatch, channels, - inputDepth, inputHeight, inputWidth, - outputDepth, outputHeight, outputWidth); - - input = THTensor_(newContiguous)(input); - THTensor_(resize5d)(output, - THTensor_(size)(input, 0), - THTensor_(size)(input, 1), - outputDepth, outputHeight, outputWidth); - THTensor_(zero)(output); - real *idata = THTensor_(data)(input); - real *odata = THTensor_(data)(output); - channels = nbatch * channels; - THAssert(inputDepth > 0 && inputHeight > 0 && inputWidth > 0 && - outputDepth > 0 && outputHeight > 0 && outputWidth > 0); - // special case: just copy - if (inputDepth == outputDepth && inputHeight == outputHeight && inputWidth == outputWidth) { - for (int t2 = 0; t2 < outputDepth; ++t2) { - const int t1 = t2; - for (int h2 = 0; h2 < outputHeight; ++h2) { - const int h1 = h2; - for (int w2 = 0; w2 < outputWidth; ++w2) { - const int w1 = w2; - const real* pos1 = &idata[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - real* pos2 = &odata[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; - for (int c = 0; c < channels; ++c) { - pos2[0] = pos1[0]; - pos1 += inputWidth * inputHeight * inputDepth; - pos2 += outputWidth * outputHeight * outputDepth; - } - } - } - } - return; - } - const float rdepth = (outputDepth > 1) ? (float)(inputDepth - 1)/(outputDepth - 1) : 0.f; - const float rheight = (outputHeight > 1) ? (float)(inputHeight - 1)/(outputHeight - 1) : 0.f; - const float rwidth = (outputWidth > 1) ? (float)(inputWidth - 1) / (outputWidth - 1) : 0.f; - for (int t2 = 0; t2 < outputDepth; ++t2) { - const float t1r = rdepth * t2; - const int t1 = t1r; - const int t1p = (t1 < inputDepth - 1) ? 1 : 0; - const real t1lambda = t1r - t1; - const real t0lambda = (real)1. - t1lambda; - for (int h2 = 0; h2 < outputHeight; ++h2) { - const float h1r = rheight * h2; - const int h1 = h1r; - const int h1p = (h1 < inputHeight - 1) ? 1 : 0; - const real h1lambda = h1r - h1; - const real h0lambda = (real)1. - h1lambda; - for (int w2 = 0; w2 < outputWidth; ++w2) { - const float w1r = rwidth * w2; - const int w1 = w1r; - const int w1p = (w1 < inputWidth - 1) ? 1 : 0; - const real w1lambda = w1r - w1; - const real w0lambda = (real)1. - w1lambda; - const real* pos1 = &idata[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - real* pos2 = &odata[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; - for (int c = 0; c < channels; ++c) { - pos2[0] = t0lambda * (h0lambda * (w0lambda * pos1[0] + w1lambda * pos1[w1p]) - + h1lambda * (w0lambda * pos1[h1p * inputWidth] - + w1lambda * pos1[h1p * inputWidth + w1p])) - + t1lambda * (h0lambda * (w0lambda * pos1[t1p * inputHeight * inputWidth] - + w1lambda * pos1[t1p * inputHeight * inputWidth - + w1p]) - + h1lambda * (w0lambda * pos1[t1p * inputHeight * inputWidth - + h1p * inputWidth] - + w1lambda * pos1[t1p * inputHeight * inputWidth - + h1p * inputWidth + w1p])); - pos1 += inputWidth * inputHeight * inputDepth; - pos2 += outputWidth * outputHeight * outputDepth; - } - } - } - } - THTensor_(free)(input); -} - -void THNN_(VolumetricUpSamplingTrilinear_updateGradInput)( - THNNState *state, - THTensor *gradOutput, - THTensor *gradInput, - int nbatch, - int channels, - int inputDepth, - int inputHeight, - int inputWidth, - int outputDepth, - int outputHeight, - int outputWidth){ - - THNN_(VolumetricUpSamplingTrilinear_shapeCheck) - (NULL, gradOutput, - nbatch, channels, - inputDepth, inputHeight, inputWidth, - outputDepth, outputHeight, outputWidth); - - THTensor_(resize5d)(gradInput, nbatch, channels, inputDepth, inputHeight, inputWidth); - THTensor_(zero)(gradInput); - gradOutput = THTensor_(newContiguous)(gradOutput); - real *data1 = THTensor_(data)(gradInput); - real *data2 = THTensor_(data)(gradOutput); - channels = nbatch * channels; - - // special case: same-size matching grids - if (inputDepth == outputDepth && inputHeight == outputHeight && inputWidth == outputWidth) { - for (int t2 = 0; t2 < outputDepth; ++t2) { - const int t1 = t2; - for (int h2 = 0; h2 < outputHeight; ++h2) { - const int h1 = h2; - for (int w2 = 0; w2 < outputWidth; ++w2) { - const int w1 = w2; - real* pos1 = &data1[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - const real* pos2 = &data2[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; - for (int c = 0; c < channels; ++c) { - pos1[0] += pos2[0]; - pos1 += inputWidth * inputHeight * inputDepth; - pos2 += outputWidth * outputHeight * outputDepth; - } - } - } - } - return; - } - const float rdepth = (outputDepth > 1) ? (float)(inputDepth - 1)/(outputDepth - 1) : 0.f; - const float rheight = (outputHeight > 1) ? (float)(inputHeight - 1)/(outputHeight - 1) : 0.f; - const float rwidth = (outputWidth > 1) ? (float)(inputWidth - 1)/(outputWidth - 1) : 0.f; - for (int t2 = 0; t2 < outputDepth; ++t2) { - const float t1r = rdepth * t2; - const int t1 = t1r; - const int t1p = (t1 < inputDepth - 1) ? 1 : 0; - const real t1lambda = t1r - t1; - const real t0lambda = (real)1. - t1lambda; - for (int h2 = 0; h2 < outputHeight; ++h2) { - const float h1r = rheight * h2; - const int h1 = h1r; - const int h1p = (h1 < inputHeight - 1) ? 1 : 0; - const real h1lambda = h1r - h1; - const real h0lambda = (real)1. - h1lambda; - for (int w2 = 0; w2 < outputWidth; ++w2) { - const float w1r = rwidth * w2; - const int w1 = w1r; - const int w1p = (w1 < inputWidth - 1) ? 1 : 0; - const real w1lambda = w1r - w1; - const real w0lambda = (real)1. - w1lambda; - real* pos1 = &data1[t1 * inputHeight * inputWidth + h1 * inputWidth + w1]; - const real* pos2 = &data2[t2 * outputHeight * outputWidth + h2 * outputWidth + w2]; - for (int c = 0; c < channels; ++c) { - pos1[0] += t0lambda * h0lambda * w0lambda * pos2[0]; - pos1[w1p] += t0lambda * h0lambda * w1lambda * pos2[0]; - pos1[h1p * inputWidth] += t0lambda * h1lambda * w0lambda * pos2[0]; - pos1[h1p * inputWidth + w1p] += t0lambda * h1lambda * w1lambda * pos2[0]; - pos1[t1p * inputHeight * inputWidth] += t1lambda * h0lambda * w0lambda * pos2[0]; - pos1[t1p * inputHeight * inputWidth + w1p] += t1lambda * h0lambda * w1lambda * pos2[0]; - pos1[t1p * inputHeight * inputWidth + h1p * inputWidth] += t1lambda * h1lambda * w0lambda * pos2[0]; - pos1[t1p * inputHeight * inputWidth + h1p * inputWidth + w1p] += t1lambda * h1lambda * w1lambda * pos2[0]; - pos1 += inputWidth * inputHeight * inputDepth; - pos2 += outputWidth * outputHeight * outputDepth; - } - } - } - } - THTensor_(free)(gradOutput); -} - -#endif diff --git a/contrib/lua-torch/nn/lib/THNN/generic/unfold.c b/contrib/lua-torch/nn/lib/THNN/generic/unfold.c deleted file mode 100644 index 14a73b5672..0000000000 --- a/contrib/lua-torch/nn/lib/THNN/generic/unfold.c +++ /dev/null @@ -1,166 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/unfold.c" -#else - -/* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */ -void THNN_(unfolded_acc)( - THTensor *finput, - THTensor *input, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - int nInputPlane, - int inputWidth, - int inputHeight, - int outputWidth, - int outputHeight) -{ - // This function assumes that - // outputHeight*dH does not overflow a long - // outputWidth*dW does not overflow a long - - int nip; - - real *input_data = THTensor_(data)(input); - real *finput_data = THTensor_(data)(finput); - -#pragma omp parallel for private(nip) - for(nip = 0; nip < nInputPlane; nip++) - { - int kw, kh, y, x; - long ix, iy; - for(kh = 0; kh < kH; kh++) - { - for(kw = 0; kw < kW; kw++) - { - real *src = finput_data + nip*((size_t)kH*kW*outputHeight*outputWidth) + kh*((size_t)kW*outputHeight*outputWidth) + kw*((size_t)outputHeight*outputWidth); - real *dst = input_data + nip*((size_t)inputHeight*inputWidth); - if (padW > 0 || padH > 0) { - int lpad,rpad; - for(y = 0; y < outputHeight; y++) { - iy = (long)y*dH - padH + kh; - if (iy < 0 || iy >= inputHeight) { - } else { - if (dW==1){ - ix = 0 - padW + kw; - lpad = fmaxf(0,padW-kw); - rpad = fmaxf(0,padW-(kW-kw-1)); - real *dst_slice = dst+(size_t)iy*inputWidth+ix+lpad; - THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+lpad, 1, outputWidth - lpad - rpad); /* note: THVector_add could handle 1 value better */ - } - else{ - for (x=0; x= inputWidth){ - }else{ - real *dst_slice = dst+(size_t)iy*inputWidth+ix; - THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+x, 1, 1); - } - } - } - } - } - } else { - for(y = 0; y < outputHeight; y++) { - iy = (long)y*dH + kh; - ix = 0 + kw; - if (dW == 1 ) { - real *dst_slice = dst+(size_t)iy*inputWidth+ix; - THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth, 1, outputWidth); /* note: THVector_add could handle 1 value better */ - }else{ - for(x = 0; x < outputWidth; x++) { - real *dst_slice = dst+(size_t)iy*inputWidth+ix+x*dW; - THVector_(cadd)(dst_slice, dst_slice, src+(size_t)y*outputWidth+x, 1, 1); - } - } - } - } - } - } - } -} - -void THNN_(unfolded_copy)( - THTensor *finput, - THTensor *input, - int kW, - int kH, - int dW, - int dH, - int padW, - int padH, - int nInputPlane, - int inputWidth, - int inputHeight, - int outputWidth, - int outputHeight) -{ - // This function assumes that - // kH*kW does not overflow an int - // nInputPlane*kH*kW does not overflow a long - // outputHeight*dH does not overflow a long - // outputWidth*dW does not overflow a long - - long k; - real *input_data = THTensor_(data)(input); - real *finput_data = THTensor_(data)(finput); - -#pragma omp parallel for private(k) - for(k = 0; k < (long)nInputPlane*kH*kW; k++) { - long nip = k / (kH*kW); - long rest = k % (kH*kW); - long kh = rest / kW; - long kw = rest % kW; - int x, y; - long ix, iy; - real *dst = finput_data + nip*((size_t)kH*kW*outputHeight*outputWidth) + kh*((size_t)kW*outputHeight*outputWidth) + kw*((size_t)outputHeight*outputWidth); - real *src = input_data + nip*((size_t)inputHeight*inputWidth); - if (padW > 0 || padH > 0) { - long lpad,rpad; - for(y = 0; y < outputHeight; y++) { - iy = (long)y*dH - padH + kh; - if (iy < 0 || iy >= inputHeight) { - memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*outputWidth); - } else { - if (dW==1){ - ix = 0 - padW + kw; - lpad = fmaxf(0,padW-kw); - rpad = fmaxf(0,padW-(kW-kw-1)); - if (outputWidth-rpad-lpad <= 0) { - memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*outputWidth); - } else { - if (lpad > 0) memset(dst+(size_t)y*outputWidth, 0, sizeof(real)*lpad); - memcpy(dst+(size_t)y*outputWidth+lpad, src+(size_t)iy*inputWidth+ix+lpad, sizeof(real)*(outputWidth-rpad-lpad)); - if (rpad > 0) memset(dst+(size_t)y*outputWidth + outputWidth - rpad, 0, sizeof(real)*rpad); - } - } - else{ - for (x=0; x= inputWidth) - memset(dst+(size_t)y*outputWidth+x, 0, sizeof(real)*1); - else - memcpy(dst+(size_t)y*outputWidth+x, src+(size_t)iy*inputWidth+ix, sizeof(real)*(1)); - } - } - } - } - } else { - for(y = 0; y < outputHeight; y++) { - iy = (long)y*dH + kh; - ix = 0 + kw; - if (dW == 1) - memcpy(dst+(size_t)y*outputWidth, src+(size_t)iy*inputWidth+ix, sizeof(real)*outputWidth); - else{ - for (x=0; x maxNorm then - local err = norm - maxNorm; - mytester:assertlt(math.abs(err), precision, string.format( - 'output after renorm exceeds maxNorm=[%f] with normType=[%f]', maxNorm, normType)) - end - end - -- check the update of the module.weight - for j = 1,totalIndex do - local k = all_index[j] - if j <= nIndex then -- k is an index in "input" - local norm = torch.norm(module.weight:select(1, k), normType) - local oriNorm = torch.norm(oriW:select(1, k), normType) - if oriNorm > maxNorm then - local err = norm - maxNorm - mytester:assertlt(math.abs(err), precision, 'unexpected norm after renorm') - else - local err = norm - oriNorm - mytester:assertlt(math.abs(err), precision, 'unpexpected norm after renorm') - end - else -- k is not an index in "input" - local err = module.weight:select(1,k):sum() - oriW:select(1,k):sum() - mytester:assertlt(math.abs(err), precision, 'unexpected changes in weight after renorm') - end - end - end -end - -function nntest.AddConstant() - local nbatch = torch.random(3, 5) - local f = torch.random(3, 5) - local h = torch.random(7,9) - local w = torch.random(7,9) - local input = torch.rand(nbatch, f, h, w):mul(20):add(-10) -- [-10, 10] - - local constant = torch.randn(1):squeeze() - local mod = nn.AddConstant(constant) - - -- Test FPROP - local output = mod:forward(input) - local delta = output - input - mytester:assertlt(delta:add(-constant):abs():max(), precision, 'fprop error') - - -- Test BPROP - local err = jac.testJacobian(mod, input) - mytester:assertlt(err, precision, 'bprop error ') - - -- inplace comparisons - local ini = math.random(3,5) - local inj = math.random(3,5) - local ink = math.random(3,5) - local constant = torch.uniform()*math.random(1,10) - - local input1 = torch.rand(ink, inj, ini) - local input2 = input1:clone() - - local module1 = nn.AddConstant(constant,true) - local module2 = nn.AddConstant(constant) - - local gradOutput1 = torch.rand(ink, inj, ini) - local gradOutput2 = gradOutput1:clone() - - local out1 = module1:forward(input1) - local out2 = module2:forward(input2) - - mytester:asserteq(0, (out1-out2):abs():max(), torch.typename(module1) .. - ' - in-place forward err ') - - local gradInput1 = module1:backward(input1, gradOutput1) - local gradInput2 = module2:backward(input2, gradOutput2) - - mytester:asserteq(0, (gradInput1-gradInput2):abs():max(), - torch.typename(module1) .. ' - in-place backward err ') - - local input1 = torch.rand(ink, inj, ini) - local input2 = input1:clone() - - module1:forward(input1) - module1:backward(module1.output,torch.rand(input1:size())) - - local err = (input1-input2):abs():max() - mytester:asserteq(err, 0, torch.typename(module1) .. - ' - inplace input change err ') - - local module3 = nn.AddConstant(torch.Tensor{1,2,3}) - local out3 = module3:forward(torch.Tensor{-1,-2,-3}) - mytester:asserteq(0, out3:abs():max(), torch.typename(module3) .. - ' - tensor constant forward err ') - local module4 = nn.AddConstant(torch.Tensor{1,2,3}) - local out4 = module3:forward(torch.Tensor{{-1,-2,-3},{-1,-2,-3}}) - mytester:asserteq(0, out4:abs():max(), torch.typename(module4) .. - ' - batch tensor constant forward err ') -end - -function nntest.MulConstant() - local nbatch = torch.random(3, 5) - local f = torch.random(3, 5) - local h = torch.random(7,9) - local w = torch.random(7,9) - local input = torch.rand(nbatch, f, h, w):mul(20):add(-10) -- [-10, 10] - - local constant = torch.randn(1):squeeze() - local mod = nn.MulConstant(constant) - - -- Test FPROP - local output = mod:forward(input) - local scale = output:clone():cdiv(input) - mytester:assertlt(scale:add(-constant):abs():max(), precision, 'fprop error') - - -- Test BPROP - local err = jac.testJacobian(mod, input) - mytester:assertlt(err, precision, 'bprop error ') - - -- inplace comparisons - local ini = math.random(3,5) - local inj = math.random(3,5) - local ink = math.random(3,5) - local constant = torch.uniform()*math.random(1,10) - - local input1 = torch.rand(ink, inj, ini) - local input2 = input1:clone() - - local module1 = nn.MulConstant(constant,true) - local module2 = nn.MulConstant(constant) - - local gradOutput1 = torch.rand(ink, inj, ini) - local gradOutput2 = gradOutput1:clone() - - local out1 = module1:forward(input1) - local out2 = module2:forward(input2) - - mytester:asserteq(0, (out1-out2):abs():max(), torch.typename(module1) .. - ' - in-place forward err ') - - local gradInput1 = module1:backward(input1, gradOutput1) - local gradInput2 = module2:backward(input2, gradOutput2) - - mytester:asserteq(0, (gradInput1-gradInput2):abs():max(), - torch.typename(module1) .. ' - in-place backward err ') - - local input1 = torch.rand(ink, inj, ini) - local input2 = input1:clone() - - module1:forward(input1) - module1:backward(module1.output,torch.rand(input1:size())) - - local err = (input1-input2):abs():max() - mytester:assertalmosteq(err, 0, 1e-15, torch.typename(module1) .. - ' - inplace input change err ') -end - -function nntest.Copy() - local input = torch.randn(3,4):double() - local c = nn.Copy('torch.DoubleTensor', 'torch.FloatTensor') - local output = c:forward(input) - mytester:assert(torch.type(output) == 'torch.FloatTensor', 'copy forward type err') - mytester:assertTensorEq(output, input:float(), 0.000001, 'copy forward value err') - local gradInput = c:backward(input, output) - mytester:assert(torch.type(gradInput) == 'torch.DoubleTensor', 'copy backward type err') - mytester:assertTensorEq(gradInput, input, 0.000001, 'copy backward value err') - c.dontCast = true - c:double() - mytester:assert(torch.type(output) == 'torch.FloatTensor', 'copy forward type err') -end - -function nntest.CMaxTable() - local input1 = torch.Tensor{{1,3},{2,4}} - local input2 = torch.Tensor{{4,2},{3,1}} - local input = {input1, input2} - local module = nn.CMaxTable() - local err1 = torch.add(module:forward(input), -1, torch.Tensor{{4,3},{3,4}}) - mytester:assertalmosteq(err1:abs():max(), 0, 1e-15, "CMaxTable forward call") - local gradOutputs = torch.Tensor{5,6,7,8} - local gradInputs = module:backward(input, gradOutputs) - local err2 = torch.add(gradInputs[1], -1, torch.Tensor{{0,6},{0,8}}) - local err3 = torch.add(gradInputs[2], -1, torch.Tensor{{5,0},{7,0}}) - mytester:assertalmosteq(err2:abs():max(), 0, 1e-15, "CMaxTable backward call") - mytester:assertalmosteq(err3:abs():max(), 0, 1e-15, "CMaxTable backward call") -end - -function nntest.CMinTable() - local input1 = torch.Tensor{{1,3},{2,4}} - local input2 = torch.Tensor{{4,2},{3,1}} - local input = {input1, input2} - local module = nn.CMinTable() - local err1 = torch.add(module:forward(input), -1, torch.Tensor{{1,2},{2,1}}) - mytester:assertalmosteq(err1:abs():max(), 0, 1e-15, "CMinTable forward call") - local gradOutputs = torch.Tensor{5,6,7,8} - local gradInputs = module:backward(input, gradOutputs) - local err2 = torch.add(gradInputs[1], -1, torch.Tensor{{5,0},{7,0}}) - local err3 = torch.add(gradInputs[2], -1, torch.Tensor{{0,6},{0,8}}) - mytester:assertalmosteq(err2:abs():max(), 0, 1e-15, "CMinTable backward call") - mytester:assertalmosteq(err3:abs():max(), 0, 1e-15, "CMinTable backward call") -end - -function nntest.JoinTable() - local tensor = torch.rand(3,4,5) - local input = {tensor, tensor} - local module - for d = 1,tensor:dim() do - module = nn.JoinTable(d) - mytester:asserteq(module:forward(input):size(d), tensor:size(d)*2, "dimension " .. d) - end - - -- Minibatch - local tensor = torch.rand(3,4,5) - local input = {tensor, tensor} - local module - for d = 1,tensor:dim()-1 do - module = nn.JoinTable(d, 2) - mytester:asserteq(module:forward(input):size(d+1), tensor:size(d+1)*2, "dimension " .. d) - end -end - -function nntest.SplitTable() - local input = torch.randn(3,4,5) - local module - for d = 1,input:dim() do - module = nn.SplitTable(d) - mytester:asserteq(#module:forward(input), input:size(d), "dimension " .. d) - end - - -- Minibatch - local input = torch.randn(3,4,5) - local module - for d = 1,input:dim()-1 do - module = nn.SplitTable(d, 2) - mytester:asserteq(#module:forward(input), input:size(d+1), "dimension " .. d) - end - - -- Negative indices - local module = nn.SplitTable(-3) - local input = torch.randn(3,4,5) - mytester:asserteq(#module:forward(input), 3, "negative index") - local input = torch.randn(2,3,4,5) - mytester:asserteq(#module:forward(input), 3, "negative index (minibatch)") -end - -function nntest.Select() - -- Test negative Select - local input = torch.Tensor{{4,6,7}, {8,0,1}} - mytester:asserteq(nn.Select(1,-1):forward(input)[1], 8, "negative index") - mytester:asserteq(nn.Select(1,-1):forward(input)[2], 0, "negative index") - mytester:asserteq(nn.Select(1,-2):forward(input)[2], 6, "negative index") - mytester:asserteq(nn.Select(-1,-1):forward(input)[1], 7, "negative dim + negative index") - mytester:asserteq(nn.Select(-1,-1):forward(input)[2], 1, "negative dim + negative index") -end - -function nntest.SelectTable() - local input = { - torch.rand(3,4,5), torch.rand(3,4,5), - {torch.rand(3,4,5)}, - {torch.rand(3,4,5), {torch.rand(3,4,5)}} - } - local gradOutputs = { - torch.rand(3,4,5), torch.rand(3,4,5), - {torch.rand(3,4,5)}, - {torch.rand(3,4,5), {torch.rand(3,4,5)}} - } - local zeros = { - torch.Tensor(3,4,5):zero(), torch.Tensor(3,4,5):zero(), - {torch.Tensor(3,4,5):zero()}, - {torch.Tensor(3,4,5):zero(), {torch.Tensor(3,4,5):zero()}} - } - local nonIdx = {2,3,4,1} - local module - for idx = 1,#input do - module = nn.SelectTable(idx) - local output = module:forward(input) - equal(output, input[idx], "output dimension " .. idx) - local gradInput = module:backward(input, gradOutputs[idx]) - equal(gradInput[idx], gradOutputs[idx], "gradInput[idx] dimension " .. idx) - equal(gradInput[nonIdx[idx]], zeros[nonIdx[idx]], "gradInput[nonIdx] dimension " .. idx) - end - - -- test negative index - local idx = -2 - module = nn.SelectTable(idx) - local output = module:forward(input) - equal(output, input[#input+idx+1], "output dimension " .. idx) - local gradInput = module:backward(input, gradOutputs[#input+idx+1]) - equal(gradInput[#input+idx+1], gradOutputs[#input+idx+1], "gradInput[idx] dimension " .. idx) - equal(gradInput[nonIdx[#input+idx+1]], zeros[nonIdx[#input+idx+1]], "gradInput[nonIdx] dimension " .. idx) - - -- test typecast - local idx = #input - module = nn.SelectTable(idx) - module:float() - local output = module:forward(input) - equal(output, input[idx], "type output") - local gradInput = module:backward(input, gradOutputs[idx]) - equal(gradInput[idx], gradOutputs[idx], "gradInput[idx] dimension " .. idx) - equal(gradInput[nonIdx[idx]], zeros[nonIdx[idx]], "gradInput[nonIdx] dimension " .. idx) - - -- test on differently sized sub-input tables given consequetively - local input1 = { - torch.rand(3,4,5), - {torch.rand(3,4,5), torch.rand(3,4,5), torch.rand(3,4,5)} - } - local input2 = { - torch.rand(3,4,5), - {torch.rand(3,4,5), torch.rand(3,4,5)} - } - - module = nn.SelectTable(1) - local output = module:forward(input1) - equal(output, input1[1], "output dimension 1") - local gradInput = module:backward(input1, output) - mytester:assert(#gradInput == #input1, "Table lengths") - mytester:assert(#gradInput[2] == #input1[2], "Sub-Table lengths") - output = module:forward(input2) - equal(output, input2[1], "output dimension 1") - gradInput = module:backward(input2, output) - mytester:assert(#gradInput == #input2, "Table lengths") - mytester:assert(#gradInput[2] == #input2[2], "Sub-Table lengths") - - -- test on tables of increasing size - local input1 = {torch.rand(3,4,5), torch.rand(3,4,5)} - local input2 = {torch.rand(3,4,5), torch.rand(3,4,5), torch.rand(3,4,5)} - local gradOutput1 = torch.randn(3,4,5) - local gradOutput2 = torch.randn(3,4,5) - - local module1 = nn.SelectTable(-1) - local output1 = module1:forward(input1):clone() - local output2 = module1:forward(input2) - local gradInput_ = module1:backward(input1, gradOutput1) - local gradInput1 = {} - for k,v in ipairs(gradInput_) do gradInput1[k] = v:clone() end - local gradInput2 = module1:backward(input2, gradOutput2) - - local module3 = nn.SelectTable(-1) - local module4 = nn.SelectTable(-1) - local output3 = module3:forward(input1) - local output4 = module4:forward(input2) - local gradInput3 = module3:backward(input1, gradOutput1) - local gradInput4 = module4:backward(input2, gradOutput2) - - equal(output1, output3, "output 1 and 3") - equal(output2, output4, "output 2 and 4") - equal(gradInput1, gradInput3, "gradInput 1 and 3") - equal(gradInput2, gradInput4, "gradInput 2 and 4") -end - -function nntest.MixtureTable() - -- 2D - -- expertInput is a Table: - local expertInput = torch.randn(5,3,6) - local gradOutput = torch.randn(5,6) - local input = { - torch.rand(5,3), - {expertInput:select(2,1), expertInput:select(2,2), expertInput:select(2,3)} - } - local module = nn.MixtureTable() - local output = module:forward(input) - local output2 = torch.cmul(input[1]:view(5,3,1):expand(5,3,6), expertInput):sum(2):squeeze(2) - mytester:assertTensorEq(output, output2, 0.000001, "mixture output") - local gradInput = module:backward(input, gradOutput) - local gradOutput2 = torch.view(gradOutput, 5, 1, 6):expandAs(expertInput) - local gaterGradInput2 = torch.cmul(gradOutput2, expertInput):sum(3):select(3,1) - mytester:assertTensorEq(gradInput[1], gaterGradInput2, 0.000001, "mixture gater gradInput") - local expertGradInput2 = torch.cmul(input[1]:view(5,3,1):expand(5,3,6), gradOutput:view(5,1,6):expand(5,3,6)) - for i, expertGradInput in ipairs(gradInput[2]) do - mytester:assertTensorEq(expertGradInput, expertGradInput2:select(2,i), 0.000001, "mixture expert "..i.." gradInput") - end - -- expertInput is a Tensor: - local input = {input[1], expertInput} - local module = nn.MixtureTable(2) - local output = module:forward(input) - mytester:assertTensorEq(output, output2, 0.000001, "mixture2 output") - local gradInput = module:backward(input, gradOutput) - mytester:assertTensorEq(gradInput[1], gaterGradInput2, 0.000001, "mixture2 gater gradInput") - mytester:assertTensorEq(gradInput[2], expertGradInput2, 0.000001, "mixture2 expert gradInput") - - -- 3D - local expertInput = torch.randn(5,6,3,2) - local gradOutput = torch.randn(5,6,2) - -- expertInput is a Table: - local input = { - torch.rand(5,3), - {expertInput:select(3,1), expertInput:select(3,2), expertInput:select(3,3)} - } - local module = nn.MixtureTable() - local output = module:forward(input) - local output2 = torch.cmul(input[1]:view(5,1,3,1):expand(5,6,3,2), expertInput):sum(3):squeeze(3) - mytester:assertTensorEq(output, output2, 0.000001, "mixture3 output") - local gradInput = module:backward(input, gradOutput) - local gradOutput2 = torch.view(gradOutput,5,6,1,2):expandAs(expertInput) - local gaterGradInput2 = torch.cmul(gradOutput2, expertInput):sum(4):select(4,1):sum(2):select(2,1) - mytester:assertTensorEq(gradInput[1], gaterGradInput2, 0.000001, "mixture3 gater gradInput") - local expertGradInput2 = torch.cmul(input[1]:view(5,1,3,1):expand(5,6,3,2), gradOutput2) - for i, expertGradInput in ipairs(gradInput[2]) do - mytester:assertTensorEq(expertGradInput, expertGradInput2:select(3,i), 0.000001, "mixture3 expert "..i.." gradInput") - end - -- expertInput is a Tensor - local input = {input[1], expertInput} - local module = nn.MixtureTable(3) - local output = module:forward(input) - mytester:assertTensorEq(output, output2, 0.000001, "mixture4 output") - local gradInput = module:backward(input, gradOutput) - mytester:assertTensorEq(gradInput[1], gaterGradInput2, 0.000001, "mixture4 gater gradInput") - mytester:assertTensorEq(gradInput[2], expertGradInput2, 0.000001, "mixture4 expert gradInput") - - -- 1D - -- expertInput is a Table: - local expertInput = torch.randn(3,6) - local gradOutput = torch.randn(6) - local input = { - torch.rand(3), - {expertInput:select(1,1), expertInput:select(1,2), expertInput:select(1,3)} - } - local module = nn.MixtureTable() - local output = module:forward(input) - local output2 = torch.cmul(input[1]:view(3,1):expand(3,6), expertInput):sum(1):squeeze(1) - mytester:assertTensorEq(output, output2, 0.000001, "mixture5 output") - local gradInput = module:backward(input, gradOutput) - local gradOutput2 = torch.view(gradOutput, 1, 6):expandAs(expertInput) - local gaterGradInput2 = torch.cmul(gradOutput2, expertInput):sum(2):select(2,1) - mytester:assertTensorEq(gradInput[1], gaterGradInput2, 0.000001, "mixture5 gater gradInput") - local expertGradInput2 = torch.cmul(input[1]:view(3,1):expand(3,6), gradOutput:view(1,6):expand(3,6)) - for i, expertGradInput in ipairs(gradInput[2]) do - mytester:assertTensorEq(expertGradInput, expertGradInput2:select(1,i), 0.000001, "mixture5 expert "..i.." gradInput") - end - -- test type-cast - module:float() - local input2 = { - input[1]:float(), - {input[2][1]:float(), input[2][2]:float(), input[2][3]:float()} - } - local output = module:forward(input2) - mytester:assertTensorEq(output, output2:float(), 0.000001, "mixture5B output") - local gradInput = module:backward(input2, gradOutput:float()) - mytester:assertTensorEq(gradInput[1], gaterGradInput2:float(), 0.000001, "mixture5B gater gradInput") - for i, expertGradInput in ipairs(gradInput[2]) do - mytester:assertTensorEq(expertGradInput, expertGradInput2:select(1,i):float(), 0.000001, "mixture5B expert "..i.." gradInput") - end - -- expertInput is a Tensor: - local input = {input[1], expertInput} - local module = nn.MixtureTable(1) - local output = module:forward(input) - mytester:assertTensorEq(output, output2, 0.000001, "mixture6 output") - local gradInput = module:backward(input, gradOutput) - mytester:assertTensorEq(gradInput[1], gaterGradInput2, 0.000001, "mixture6 gater gradInput") - mytester:assertTensorEq(gradInput[2], expertGradInput2, 0.000001, "mixture6 expert gradInput") - -- test type-cast: - module:float() - local input2 = {input[1]:float(), expertInput:float()} - local output = module:forward(input2) - mytester:assertTensorEq(output, output2:float(), 0.000001, "mixture6B output") - local gradInput = module:backward(input2, gradOutput:float()) - mytester:assertTensorEq(gradInput[1], gaterGradInput2:float(), 0.000001, "mixture6B gater gradInput") - mytester:assertTensorEq(gradInput[2], expertGradInput2:float(), 0.000001, "mixture6B expert gradInput") - - --2D gater, 1D expert - -- expertInput is a Table: - local expertInput = torch.randn(5,3) - local gradOutput = torch.randn(5) - local input = { - torch.rand(5,3), - {expertInput:select(2,1), expertInput:select(2,2), expertInput:select(2,3)} - } - local module = nn.MixtureTable() - local output = module:forward(input) - local output2 = torch.cmul(input[1], expertInput):sum(2):squeeze(2) - mytester:assertTensorEq(output, output2, 0.000001, "mixture7 output") - local gradInput = module:backward(input, gradOutput) - local gradOutput2 = torch.view(gradOutput, 5, 1):expandAs(expertInput) - local gaterGradInput2 = torch.cmul(gradOutput2, expertInput) - mytester:assertTensorEq(gradInput[1], gaterGradInput2, 0.000001, "mixture7 gater gradInput") - local expertGradInput2 = torch.cmul(input[1], gradOutput:view(5,1):expand(5,3)) - for i, expertGradInput in ipairs(gradInput[2]) do - mytester:assertTensorEq(expertGradInput, expertGradInput2:select(2,i), 0.000001, "mixture7 expert "..i.." gradInput") - end -end - -function nntest.Narrow() - -- check basic narrow functionality #1 - local input = torch.rand(9, 4, 14) - local output = input:narrow(1, 3, 5) - local gradOutput = torch.rand(5, 4, 14) - local gradInput = torch.zeros(9, 4, 14) - gradInput:narrow(1, 3, 5):copy(gradOutput) - local module1 = nn.Narrow(1, 3, 5) - local output1 = module1:forward(input) - local gradInput1 = module1:backward(input, gradOutput) - local module2 = nn.Narrow(1, 3, -3) - local output2 = module2:forward(input) - local gradInput2 = module2:backward(input, gradOutput) - mytester:assertTensorEq(output, output1, 0.0000001, "Narrow #1 output err") - mytester:assertTensorEq(gradInput, gradInput1, 0.00001, "Narrow #1 gradInput err") - mytester:assertTensorEq(output, output2, 0.0000001, "Narrow #1 negative output err") - mytester:assertTensorEq(gradInput, gradInput2, 0.00001, "Narrow #1 negative gradInput err") - - -- check basic narrow functionality #2 - local input = torch.rand(3, 10, 4) - local output = input:narrow(2, 5, 3) - local gradOutput = torch.rand(3, 3, 4) - local gradInput = torch.zeros(3, 10, 4) - gradInput:narrow(2, 5, 3):copy(gradOutput) - local module1 = nn.Narrow(2, 5, 3) - local output1 = module1:forward(input) - local gradInput1 = module1:backward(input, gradOutput) - local module2 = nn.Narrow(2, 5, -4) - local output2 = module2:forward(input) - local gradInput2 = module2:backward(input, gradOutput) - mytester:assertTensorEq(output, output1, 0.0000001, "Narrow #2 output err") - mytester:assertTensorEq(gradInput, gradInput1, 0.00001, "Narrow #2 gradInput err") - mytester:assertTensorEq(output, output2, 0.0000001, "Narrow #2 negative output err") - mytester:assertTensorEq(gradInput, gradInput2, 0.00001, "Narrow #2 negative gradInput err") - - -- check basic narrow functionality #3 - local input = torch.rand(6, 11, 7) - local output = input:narrow(3, 1, 1) - local gradOutput = torch.rand(6, 11, 1) - local gradInput = torch.zeros(6, 11, 7) - gradInput:narrow(3, 1, 1):copy(gradOutput) - local module1 = nn.Narrow(3, 1, 1) - local output1 = module1:forward(input) - local gradInput1 = module1:backward(input, gradOutput) - local module2 = nn.Narrow(3, 1, -7) - local output2 = module2:forward(input) - local gradInput2 = module2:backward(input, gradOutput) - mytester:assertTensorEq(output, output1, 0.0000001, "Narrow #3 output err") - mytester:assertTensorEq(gradInput, gradInput1, 0.00001, "Narrow #3 gradInput err") - mytester:assertTensorEq(output, output2, 0.0000001, "Narrow #3 negative output err") - mytester:assertTensorEq(gradInput, gradInput2, 0.00001, "Narrow #3 negative gradInput err") - - -- check basic narrow functionality #4 - local input = torch.rand(3, 10, 4) - local output = input:narrow(2, 5, 3) - local gradOutput = torch.rand(3, 3, 4) - local gradInput = torch.zeros(3, 10, 4) - gradInput:narrow(2, 5, 3):copy(gradOutput) - local module1 = nn.Narrow(-2, 5, 3) - local output1 = module1:forward(input) - local gradInput1 = module1:backward(input, gradOutput) - local module2 = nn.Narrow(-2, 5, -4) - local output2 = module2:forward(input) - local gradInput2 = module2:backward(input, gradOutput) - mytester:assertTensorEq(output, output1, 0.0000001, "Narrow #4 output err") - mytester:assertTensorEq(gradInput, gradInput1, 0.00001, "Narrow #4 gradInput err") - mytester:assertTensorEq(output, output2, 0.0000001, "Narrow #4 negative output err") - mytester:assertTensorEq(gradInput, gradInput2, 0.00001, "Narrow #4 negative gradInput err") - - -- check narrow negative offset - local input = torch.rand(3, 10, 4) - local output = input:narrow(2, 1, 3) - local gradOutput = torch.rand(3, 3, 4) - local gradInput = torch.zeros(3, 10, 4) - gradInput:narrow(2, 1, 3):copy(gradOutput) - local module1 = nn.Narrow(2, -1, 7) - local output1 = module1:forward(input) - local gradInput1 = module1:backward(input, gradOutput) - local module2 = nn.Narrow(2, 1, 3) - local output2 = module2:forward(input) - local gradInput2 = module2:backward(input, gradOutput) - mytester:assertTensorEq(output, output1, 0.0000001, "Narrow #5 output err") - mytester:assertTensorEq(gradInput, gradInput1, 0.00001, "Narrow #5 gradInput err") - mytester:assertTensorEq(output, output2, 0.0000001, "Narrow #5 negative output err") - mytester:assertTensorEq(gradInput, gradInput2, 0.00001, "Narrow #5 negative gradInput err") -end - -function nntest.NarrowTable() - local input = torch.randn(3,10,4) - local gradOutput = torch.randn(3,3,4) - local nt = nn.NarrowTable(5,3) - local seq = nn.Sequential() - seq:add(nn.SplitTable(1,2)) - seq:add(nt) - seq:add(nn.JoinTable(1,1)) - seq:add(nn.Reshape(3,3,4)) - local seq2 = nn.Narrow(2,5,3) - local output = seq:forward(input) - local gradInput = seq:backward(input, gradOutput) - local output2 = seq2:forward(input) - local gradInput2 = seq2:backward(input, gradOutput) - mytester:assertTensorEq(output, output2, 0.0000001, "NarrowTable output err") - mytester:assertTensorEq(gradInput, gradInput2, 0.00001, "NarrowTable gradInput err") - - -- now try it with a smaller input - local input = input:narrow(2, 1, 8) - local output = seq:forward(input) - local gradInput = seq:backward(input, gradOutput) - local output2 = seq2:forward(input) - local gradInput2 = seq2:backward(input, gradOutput) - mytester:assertTensorEq(output, output2, 0.0000001, "NarrowTable small output err") - mytester:assertTensorEq(gradInput, gradInput2, 0.00001, "NarrowTable small gradInput err") - - -- test type-cast - local input = input:float() - local gradOutput = gradOutput:float() - seq:float() - seq2:float() - local output = seq:forward(input) - local gradInput = seq:backward(input, gradOutput) - local output2 = seq2:forward(input) - local gradInput2 = seq2:backward(input, gradOutput) - mytester:assertTensorEq(output, output2, 0.0000001, "NarrowTable output float err") - mytester:assertTensorEq(gradInput, gradInput2, 0.00001, "NarrowTable gradInput float err") -end - -function nntest.View() - local input = torch.rand(10) - local template = torch.rand(5,2) - local target = template:size():totable() - local module = nn.View(template:size()) - mytester:assertTableEq(module:forward(input):size():totable(), target, "Error in forward (1)") - local module = nn.View(table.unpack(target)) - mytester:assertTableEq(module:forward(input):size():totable(), target, "Error in forward (2)") - - -- Minibatch - local minibatch = torch.rand(5,10) - mytester:asserteq(module:forward(minibatch):size(1), - minibatch:size(1), - "Error in minibatch dimension") - mytester:asserteq(module:forward(minibatch):nElement(), - minibatch:nElement(), - "Error in minibatch nElement") - local module = nn.View(-1):setNumInputDims(1) - mytester:asserteq(module:forward(minibatch):size(1), - minibatch:size(1), - "Error in minibatch dimension with size -1") - mytester:asserteq(module:forward(minibatch):nElement(), - minibatch:nElement(), - "Error in minibatch nElement with size -1") - - -- another setNumInputDims case - local minibatch = torch.rand(5,4,10) - local module = nn.View(-1):setNumInputDims(2) - mytester:asserteq(module:forward(minibatch):size(1), - minibatch:size(1), - "Error in minibatch dimension with size -1") - - -- another setNumInputDims case - local minibatch = torch.rand(2,5,4,10) - local module = nn.View(4,-1):setNumInputDims(2) - local out = module:forward(minibatch) - mytester:asserteq(out:size(1), minibatch:size(1)*minibatch:size(2), - "Error in minibatch dimension with size -1") - mytester:asserteq(out:size(2), minibatch:size(3), - "Error in minibatch dimension with size -1") - mytester:asserteq(out:size(3), minibatch:size(4), - "Error in minibatch dimension with size -1") - - -- Minibatch Generalization - local minibatch = torch.rand(5,2,6) - local module = nn.View(6) - mytester:asserteq( - module:forward(minibatch):size(1), - minibatch:size(1)*minibatch:size(2), - "Error in minibatch generalization dimension") - mytester:asserteq( - module:forward(minibatch):nElement(), - minibatch:nElement(), - "Error in minibatch generalization nElement") -end - -function nntest.Reshape() - local input = torch.rand(10) - local template = torch.rand(5,2) - local target = template:size():totable() - local module = nn.Reshape(template:size()) - mytester:assertTableEq(module:forward(input):size():totable(), target, "Error in forward (1)") - local module = nn.View(table.unpack(target)) - mytester:assertTableEq(module:forward(input):size():totable(), target, "Error in forward (2)") - - -- Minibatch - local minibatch = torch.rand(5,10) - mytester:asserteq(module:forward(minibatch):size(1), - minibatch:size(1), - "Error in minibatch dimension") - mytester:asserteq(module:forward(minibatch):nElement(), - minibatch:nElement(), - "Error in minibatch nElement") -end - --- Define a test for SpatialUpSamplingCuda -function nntest.SpatialUpSamplingNearest() - local scale = torch.random(2,4) - for dim = 3,4 do - local m = nn.SpatialUpSamplingNearest(scale) - - -- Create a randomly sized dimD vector - local shape = {} - for i = 1, dim do - table.insert(shape, torch.random(2, 2+dim-1)) - end - - -- Check that the gradient is correct by using finite elements - local input = torch.Tensor(table.unpack(shape)):zero() - - local err = jac.testJacobian(m, input) - mytester:assertlt(err, precision, ' error on state ') - - local ferr, berr = jac.testIO(m, input) - mytester:asserteq(ferr, 0, torch.typename(m)..' - i/o forward err ') - mytester:asserteq(berr, 0, torch.typename(m)..' - i/o backward err ') - end -end - -function nntest.SpatialUpSamplingBilinear() - for scale=2,4 do - for dim = 3,4 do - local m = nn.SpatialUpSamplingBilinear(scale) - - -- Create a randomly sized dimD vector - local shape = {} - for i = 1, dim do - table.insert(shape, torch.random(2, 2+dim-1)) - end - - -- Check that the gradient is correct by using finite elements - local input = torch.DoubleTensor(table.unpack(shape)):normal() - - local err = jac.testJacobian(m, input) - mytester:assertlt(err, precision, ' error on state ') - - local ferr, berr = jac.testIO(m, input) - mytester:asserteq(ferr, 0, torch.typename(m)..' - i/o forward err ') - mytester:asserteq(berr, 0, torch.typename(m)..' - i/o backward err ') - end - end -end - -function nntest.Concat() - local input = torch.randn(4, 2) - local num_modules = math.random(2, 5) - local linears = {} - for i = 1,num_modules do - linears[i] = nn.Linear(2,5) - end - - local m = nn.Concat(1) - for _,module in ipairs(linears) do - m:add(module) - module:zeroGradParameters() - module.weight:fill(1) - module.bias:fill(0) - end - mytester:asserteq(m:size(), num_modules) - - local output = m:forward(input) - local output2 = input:sum(2):expand(4, 5):repeatTensor(num_modules, 1) - mytester:assertTensorEq(output2, output, 0.000001, 'Concat forward err') - - local gradInput = m:backward(input, torch.ones(output2:size())) - local gradInput2 = torch.ones(4, 2):fill(num_modules * 5) - mytester:assertTensorEq(gradInput, gradInput2, 0.000001, 'Concat backward err (gradInput)') - - local gradWeight = input:sum(1):expand(5, 2) - for _,module in ipairs(linears) do - mytester:assertTensorEq(gradWeight, module.gradWeight, 0.000001, 'Concat backward err (gradWeight)') - end -end - -function nntest.Parallel() - local input = torch.randn(3, 4, 5) - local m = nn.Parallel(1,3) - m:add(nn.View(4,5,1)) - m:add(nn.View(4,5,1)) - m:add(nn.View(4,5,1)) - - local output = m:forward(input) - local output2 = input:transpose(1,3):transpose(1,2) - mytester:assertTensorEq(output2, output, 0.000001, 'Parallel forward err') - - local gradInput = m:backward(input, output2) - mytester:assertTensorEq(gradInput, input, 0.000001, 'Parallel backward err') -end - -function nntest.ParallelTable() - local input = torch.randn(3, 4, 5) - local p = nn.ParallelTable() - p:add(nn.View(4,5,1)) - p:add(nn.View(4,5,1)) - p:add(nn.View(4,5,1)) - local m = nn.Sequential() - m:add(nn.SplitTable(1)) - m:add(p) - m:add(nn.JoinTable(3)) - - local output = m:forward(input) - local output2 = input:transpose(1,3):transpose(1,2) - mytester:assertTensorEq(output2, output, 0.000001, 'ParallelTable forward err') - - local gradInput = m:backward(input, output2) - mytester:assertTensorEq(gradInput, input, 0.000001, 'ParallelTable backward err') -end - -function nntest.ConcatTable() - -- Test tensor input - local input = torch.rand(5, 5, 5) - local m = nn.Sequential() - - local concat = nn.ConcatTable() - concat:add(nn.Identity()) - - m:add(concat) -- Output of concat is a table of length 1 - m:add(nn.JoinTable(1)) -- jac needs a tensor tensor output - - local err = jac.testJacobian(m, input) - mytester:assertlt(err, precision, ' error on state ') - - local ferr, berr = jac.testIO(m, input) - mytester:asserteq(ferr, 0, torch.typename(m)..' - i/o forward err ') - mytester:asserteq(berr, 0, torch.typename(m)..' - i/o backward err ') - - -- Now test a table input - local input = { - torch.randn(3,4):float(), torch.randn(3,4):float(), {torch.randn(3,4):float()} - } - local _gradOutput = { - torch.randn(3,3,4):float(), torch.randn(3,3,4):float(), torch.randn(3,3,4):float() - } - local gradOutput = { - {_gradOutput[1][1], _gradOutput[2][1], {_gradOutput[3][1]}}, - {_gradOutput[1][2], _gradOutput[2][2], {_gradOutput[3][2]}}, - {_gradOutput[1][3], _gradOutput[2][3], {_gradOutput[3][3]}} - } - local module = nn.ConcatTable() - module:add(nn.Identity()) - module:add(nn.Identity()) - module:add(nn.Identity()) - module:float() - - local output = module:forward(input) - local output2 = {input, input, input} - equal(output2, output, "ConcatTable table output") - local gradInput = module:backward(input, gradOutput) - local gradInput2 = {_gradOutput[1]:sum(1):squeeze(1), _gradOutput[2]:sum(1):squeeze(1), {_gradOutput[3]:sum(1):squeeze(1)}} - equal(gradInput, gradInput2, "ConcatTable table gradInput") - - -- test outputs for variable length inputs - local test = nn.ConcatTable() - test:add(nn.Identity()) - test:add(nn.Identity()) - - local x = {torch.randn(5), torch.randn(5)} - local y = {torch.randn(5)} - - local o1 = #(test:forward(x)) - local go1 = #(test:backward(x, {x, x})) - local o2 = #(test:forward(y)) - local go2 = #(test:backward(y, {y, y})) - mytester:assert(o1 == 2, "ConcatTable table variable length") - mytester:assert(go1 == 2, "ConcatTable table variable length") - mytester:assert(o2 == 2, "ConcatTable table variable length") - mytester:assert(go2 == 1, "ConcatTable table variable length") -end - -function nntest.MapTable() - local map = nn.MapTable(nn.Linear(10,5)) - local lin = map:get(1):clone() - - -- ParalleTable with clones as reference - local parallel = nn.ParallelTable() - parallel:add(lin) - parallel:add(lin:clone('weight','bias')) - parallel:add(lin:clone('weight','bias')) - - local input = {torch.rand(10), torch.rand(10), torch.rand(10)} - local gradOutput = {torch.ones(5), torch.ones(5), torch.ones(5)} - - local outputM = map:forward(input) - local outputP = parallel:forward(input) - mytester:assertTensorEq(outputM[1], outputP[1]) - mytester:assertTensorEq(outputM[2], outputP[2]) - mytester:assertTensorEq(outputM[3], outputP[3]) - mytester:assert(map:size() == #input) - - map:zeroGradParameters() - parallel:zeroGradParameters() - local gradInputM = map:backward(input, gradOutput) - local gradInputP = parallel:backward(input, gradOutput) - mytester:assertTensorEq(gradInputM[1], gradInputP[1]) - mytester:assertTensorEq(gradInputM[2], gradInputP[2]) - mytester:assertTensorEq(gradInputM[3], gradInputP[3]) - - map:updateParameters(1) - parallel:updateParameters(1) - mytester:assertTensorEq(map:get(1).weight, parallel:get(1).weight, 0.00001) - - local output = map:forward({input[1], input[2], input[3], input[3]}) - mytester:assert(#output == 4) - local output = map:forward({input[1], input[2]}) - mytester:assert(#output == 2) - - map:resize(10) - mytester:assert(map:size() == 10) - map:resize(4) - mytester:assert(map:size() == 4) - mytester:assert(torch.pointer(map:get(4).weight:storage()) - == torch.pointer(map:get(1).weight:storage())) - map:clearState() - mytester:assert(map:size() == 1) - - -- check if gradients are correctly reset - -- share weights and gradients - map = nn.MapTable(nn.Linear(10,5)) - map:forward(input) - _, gradParams = map:getParameters() - gradParams:uniform() - map:zeroGradParameters() - mytester:assertlt(gradParams:sum(),precision) - - -- check if gradients are correctly reset - -- do not share weights and gradients - map = nn.MapTable(nn.Linear(10,5),false) - map:forward(input) - _, gradParams = map:getParameters() - gradParams:uniform() - map:zeroGradParameters() - mytester:assertlt(gradParams:sum(),precision) -end - -function nntest.FlattenTable() - -- Create a nested table. Obviously we can't even stochastically test - -- the space of all possible nested tables (it's infinite), but here is a - -- hand-coded one that covers all the cases we need: - local input = { - torch.rand(1), - { - torch.rand(2), - { - torch.rand(3) - }, - }, - torch.rand(4) - } - local gradOutput = { - torch.rand(1), - torch.rand(2), - torch.rand(3), - torch.rand(4) - } - - -- Check the FPROP - local m = nn.FlattenTable() - local output = m:forward(input) - mytester:assert(#output == 4, torch.typename(m)..' - fprop err ') - -- This is ugly, but check that the mapping from input to output is correct - mytester:assert(output[1] == input[1]) - mytester:assert(output[2] == input[2][1]) - mytester:assert(output[3] == input[2][2][1]) - mytester:assert(output[4] == input[3]) - - -- Check the BPROP - local gradInput = m:backward(input, gradOutput) - -- Again, check that the mapping is correct - mytester:assert(gradOutput[1] == gradInput[1]) - mytester:assert(gradOutput[2] == gradInput[2][1]) - mytester:assert(gradOutput[3] == gradInput[2][2][1]) - mytester:assert(gradOutput[4] == gradInput[3]) - - -- More uglyness: FlattenTable doesn't rebuild the table every updateOutput - -- call, so we need to make sure that modifications to the input are - -- detected correctly (and that the table is correctly rebuilt. - -- CASE 1: Nothing changes so the output table shouldn't be redefined - local old_input_map = m.input_map - local old_output = m.output - local _ = m:forward(input) - mytester:assert(old_input_map == m.input_map and old_output == m.output) - - -- CASE 2: An element is added to the input table - old_input_map = m.input_map - old_output = m.output - input[2][#(input[2])+1] = torch.rand(5) - m:forward(input) - mytester:assert(old_input_map ~= m.input_map and old_output ~= m.output) - - -- CASE 3: An element is removed from the input table - old_input_map = m.input_map - old_output = m.output - input[#input] = nil - m:forward(input) - mytester:assert(old_input_map ~= m.input_map and old_output ~= m.output) - - -- At this point further testing is not necessary I think, but just to be - -- consistent: perform a jacobian test by using SplitTable and JointTable - -- elements - m = nn.Sequential() - local par = nn.ParallelTable() - par:add(nn.SplitTable(1)) - par:add(nn.SplitTable(1)) - m:add(nn.SplitTable(1)) - m:add(par) -- this will create a nested table - m:add(nn.FlattenTable()) -- This will flatten the nested table - m:add(nn.JoinTable(1)) -- Finally, this will create a 1D tensor - - input = torch.Tensor(2,2,2) - local err = jac.testJacobian(m, input) - mytester:assertlt(err, precision, 'error on bprop ') -end - -function nntest.L1Penalty() - local weight = 1 - local sizeAverage = false - local m = nn.L1Penalty(weight, sizeAverage, false) - - local input = torch.rand(2,10):add(-0.5) - input[1][1] = 0 - - local _ = m:forward(input) - local grad = m:backward(input, torch.ones(input:size())) - - local err = input:clone():abs():sum()*weight - m.loss - mytester:assertlt(math.abs(err), precision, 'error on fprop ') - - local true_grad = (input:gt(0):typeAs(grad) + - input:lt(0):typeAs(grad):mul(-1)):mul(weight) - mytester:assertlt((true_grad - grad):abs():max(), precision, - 'error on bprop ') - - -- Note: We cannot use the Jacobian test for this Module since the backward - -- gradient cannot be estimated using finite differences (ie, the loss - -- during BPROP is not included in the FPROP output) -end - -function nntest.L1Cost() - local input = torch.rand(10) * 2 - 1 - local m = nn.L1Cost() - local output = m:forward(input) - local err = output - torch.abs(input):sum() - mytester:assertalmosteq(err, 0, 1e-15, 'L1Cost forward') -end - -function nntest.DepthConcat() - local outputSize = torch.IntTensor{5,6,7,8} - local input = torch.randn(2,3,12,12) - local gradOutput = torch.randn(2, outputSize:sum(), 12, 12) - local concat = nn.DepthConcat(2) - concat:add(nn.SpatialConvolutionMM(3, outputSize[1], 1, 1, 1, 1)) --> 2, 5, 12, 12 - concat:add(nn.SpatialConvolutionMM(3, outputSize[2], 3, 3, 1, 1)) --> 2, 6, 10, 10 - concat:add(nn.SpatialConvolutionMM(3, outputSize[3], 4, 4, 1, 1)) --> 2, 7, 9, 9 - concat:add(nn.SpatialConvolutionMM(3, outputSize[4], 5, 5, 1, 1)) --> 2, 8, 8, 8 - concat:zeroGradParameters() - -- forward/backward - local outputConcat = concat:forward(input) - local gradInputConcat = concat:backward(input, gradOutput) - -- the spatial dims are the largest, the nFilters is the sum - local output = torch.Tensor(2, outputSize:sum(), 12, 12):zero() -- zero for padding - local narrows = { {{},{1,5},{},{}}, {{},{6,11},{2,11},{2,11}}, {{},{12,18},{2,10},{2,10}}, {{},{19,26},{3,10},{3,10}} } - local gradInput = input:clone():zero() - for i=1,4 do - local conv = concat:get(i) - local gradWeight = conv.gradWeight:clone() - conv:zeroGradParameters() - output[narrows[i]]:copy(conv:forward(input)) - gradInput:add(conv:backward(input, gradOutput[narrows[i]])) - mytester:assertTensorEq(gradWeight, conv.gradWeight, 0.000001, "Error in SpatialConcat:accGradParameters for conv "..i) - end - mytester:assertTensorEq(output, outputConcat, 0.000001, "Error in SpatialConcat:updateOutput") - mytester:assertTensorEq(gradInput, gradInputConcat, 0.000001, "Error in SpatialConcat:updateGradInput") -end - -function nntest.MV() - local mv = nn.MV(false) - local outdim = torch.random(10,20) - local indim = torch.random(10,20) - local M = torch.randn(outdim, indim) - local V = torch.randn(indim) - - -- Test forward pass. - local output = mv:forward({M, V}) - mytester:assertTableEq(output:size():totable(), {outdim}, - 'Output has wrong dimensionality') - mytester:assertTensorEq(output, M * V, 1e-10, - 'Wrong output') - - -- Test backward pass. - local gradOutput = torch.randn(outdim) - local gradInput = mv:backward({M, V}, gradOutput) - mytester:assert(#gradInput == 2, 'gradInput must be table of size 2') - local gradM, gradV = table.unpack(gradInput) - mytester:assertTableEq(gradM:size():totable(), M:size():totable(), - 'Gradient for input M has wrong size') - mytester:assertTableEq(gradV:size():totable(), V:size():totable(), - 'Gradient for input V has wrong size') - mytester:assertTensorEq(gradM, torch.ger(gradOutput, V), 1e-10, - 'Wrong gradient for input M') - -- d/dV(j) (A(i,j)V(j)) = ( - mytester:assertTensorEq(gradV, M:t() * gradOutput, 1e-10, - 'Wrong gradient for input V') -end - -function nntest.BatchMVNoTranspose() - local mv = nn.MV() - local outdim = torch.random(10,20) - local indim = torch.random(10,20) - for bSize = 1, 11, 5 do - local M = torch.randn(bSize, outdim, indim) - local V = torch.randn(bSize, indim) - - -- Test forward pass. - local output = mv:forward({M, V}) - mytester:assertTableEq(output:size():totable(), {bSize, outdim}, - 'Output has wrong dimensionality') - for i = 1, bSize do - mytester:assertTensorEq(output[i], M[i] * V[i], 1e-10, - 'Output wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - - -- Test backward pass. - local gradOutput = torch.randn(bSize, outdim) - local gradInput = mv:backward({M, V}, gradOutput) - mytester:assert(#gradInput == 2, 'gradInput must be table of size 2') - local gradM, gradV = table.unpack(gradInput) - mytester:assertTableEq(gradM:size():totable(), M:size():totable(), - 'Gradient for input M has wrong size') - mytester:assertTableEq(gradV:size():totable(), V:size():totable(), - 'Gradient for input V has wrong size') - for i = 1, bSize do - mytester:assertTensorEq(gradM[i], torch.ger(gradOutput[i], V[i]), 1e-10, - 'Gradient for input M wrong for bSize = ' .. bSize .. ' and i = ' .. i) - mytester:assertTensorEq(gradV[i], M[i]:t() * gradOutput[i], 1e-10, - 'Gradient for input V wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - end -end - -function nntest.BatchMVTranspose() - local mv = nn.MV(true) - local outdim = torch.random(10,20) - local indim = torch.random(10,20) - for bSize = 1, 11, 5 do - local M = torch.randn(bSize, indim, outdim) - local V = torch.randn(bSize, indim) - - -- Test forward pass. - local output = mv:forward({M, V}) - mytester:assertTableEq(output:size():totable(), {bSize, outdim}, - 'Output has wrong dimensionality') - for i = 1, bSize do - mytester:assertTensorEq(output[i], M[i]:t() * V[i], 1e-10, - 'Output wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - - -- Test backward pass. - local gradOutput = torch.randn(bSize, outdim) - local gradInput = mv:backward({M, V}, gradOutput) - mytester:assert(#gradInput == 2, 'gradInput must be table of size 2') - local gradM, gradV = table.unpack(gradInput) - mytester:assertTableEq(gradM:size():totable(), M:size():totable(), - 'Gradient for input M has wrong size') - mytester:assertTableEq(gradV:size():totable(), V:size():totable(), - 'Gradient for input V has wrong size') - for i = 1, bSize do - mytester:assertTensorEq(gradM[i], torch.ger(V[i], gradOutput[i]), 1e-10, - 'Gradient for input M wrong for bSize = ' .. bSize .. ' and i = ' .. i) - mytester:assertTensorEq(gradV[i], M[i] * gradOutput[i], 1e-10, - 'Gradient for input V wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - end -end - -local function createMatrixInputSizes() - local M = torch.random(10, 20) - local N = torch.random(10, 20) - local P = torch.random(10, 20) - return M, N, P -end - -function nntest.MM() - local mm = nn.MM(false, true) - local M, N, P = createMatrixInputSizes() - local A = torch.randn(M, N) - local B = torch.randn(P, N) - - -- Test forward pass. - local output = mm:forward({A, B}) - mytester:assertTableEq(output:size():totable(), {M, P}, - 'Output has wrong dimensionality') - mytester:assertTensorEq(output, A * B:t(), 1e-10, - 'Wrong output') - - -- Test backward pass. - local gradOutput = torch.randn(M, P) - local gradInput = mm:backward({A, B}, gradOutput) - mytester:assert(#gradInput == 2, 'gradInput must be table of size 2') - local gradA, gradB = table.unpack(gradInput) - mytester:assertTableEq(gradA:size():totable(), A:size():totable(), - 'Gradient for input A has wrong size') - mytester:assertTableEq(gradB:size():totable(), B:size():totable(), - 'Gradient for input B has wrong size') - mytester:assertTensorEq(gradA, gradOutput * B, 1e-10, - 'Wrong gradient for input A') - mytester:assertTensorEq(gradB, gradOutput:t() * A, 1e-10, - 'Wrong gradient for input B') -end - -function nntest.BatchMMNoTranspose() - local mm = nn.MM() - local M, N, P = createMatrixInputSizes() - for bSize = 1, 11, 5 do - local A = torch.randn(bSize, M, N) - local B = torch.randn(bSize, N, P) - - -- Test forward pass. - local output = mm:forward({A, B}) - mytester:assertTableEq(output:size():totable(), {bSize, M, P}, - 'Output has wrong dimensionality') - for i = 1, bSize do - mytester:assertTensorEq(output[i], A[i] * B[i], 1e-10, - 'Output wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - - -- Test backward pass. - local gradOutput = torch.randn(bSize, M, P) - local gradInput = mm:backward({A, B}, gradOutput) - mytester:assert(#gradInput == 2, 'gradInput must be table of size 2') - local gradA, gradB = table.unpack(gradInput) - mytester:assertTableEq(gradA:size():totable(), A:size():totable(), - 'Gradient for input A has wrong size') - mytester:assertTableEq(gradB:size():totable(), B:size():totable(), - 'Gradient for input B has wrong size') - for i = 1, bSize do - mytester:assertTensorEq(gradA[i], gradOutput[i] * B[i]:t(), 1e-10, - 'Gradient for input A wrong for bSize = ' .. bSize .. ' and i = ' .. i) - mytester:assertTensorEq(gradB[i], A[i]:t() * gradOutput[i], 1e-10, - 'Gradient for input B wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - end -end - -function nntest.BatchMMTransposeA() - local mm = nn.MM(true, false) - local M, N, P = createMatrixInputSizes() - for bSize = 1, 11, 5 do - local A = torch.randn(bSize, N, M) - local B = torch.randn(bSize, N, P) - - -- Test forward pass. - local output = mm:forward({A, B}) - mytester:assertTableEq(output:size():totable(), {bSize, M, P}, - 'Output has wrong dimensionality') - for i = 1, bSize do - mytester:assertTensorEq(output[i], A[i]:t() * B[i], 1e-10, - 'Output wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - - -- Test backward pass. - local gradOutput = torch.randn(bSize, M, P) - local gradInput = mm:backward({A, B}, gradOutput) - mytester:assert(#gradInput == 2, 'gradInput must be table of size 2') - local gradA, gradB = table.unpack(gradInput) - mytester:assertTableEq(gradA:size():totable(), A:size():totable(), - 'Gradient for input A has wrong size') - mytester:assertTableEq(gradB:size():totable(), B:size():totable(), - 'Gradient for input B has wrong size') - for i = 1, bSize do - mytester:assertTensorEq(gradA[i], B[i] * gradOutput[i]:t(), 1e-10, - 'Gradient for input A wrong for bSize = ' .. bSize .. ' and i = ' .. i) - mytester:assertTensorEq(gradB[i], A[i] * gradOutput[i], 1e-10, - 'Gradient for input B wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - end -end - -function nntest.BatchMMTransposeB() - local mm = nn.MM(false, true) - local M, N, P = createMatrixInputSizes() - for bSize = 1, 11, 5 do - local A = torch.randn(bSize, M, N) - local B = torch.randn(bSize, P, N) - - -- Test forward pass. - local output = mm:forward({A, B}) - mytester:assertTableEq(output:size():totable(), {bSize, M, P}, - 'Output has wrong dimensionality') - for i = 1, bSize do - mytester:assertTensorEq(output[i], A[i] * B[i]:t(), 1e-10, - 'Output wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - - -- Test backward pass. - local gradOutput = torch.randn(bSize, M, P) - local gradInput = mm:backward({A, B}, gradOutput) - mytester:assert(#gradInput == 2, 'gradInput must be table of size 2') - local gradA, gradB = table.unpack(gradInput) - mytester:assertTableEq(gradA:size():totable(), A:size():totable(), - 'Gradient for input A has wrong size') - mytester:assertTableEq(gradB:size():totable(), B:size():totable(), - 'Gradient for input B has wrong size') - for i = 1, bSize do - mytester:assertTensorEq(gradA[i], gradOutput[i] * B[i], 1e-10, - 'Gradient for input A wrong for bSize = ' .. bSize .. ' and i = ' .. i) - mytester:assertTensorEq(gradB[i], gradOutput[i]:t() * A[i], 1e-10, - 'Gradient for input B wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - end -end - -function nntest.BatchMMTransposeBoth() - local mm = nn.MM(true, true) - local M, N, P = createMatrixInputSizes() - for bSize = 1, 11, 5 do - local A = torch.randn(bSize, N, M) - local B = torch.randn(bSize, P, N) - - -- Test forward pass. - local output = mm:forward({A, B}) - mytester:assertTableEq(output:size():totable(), {bSize, M, P}, - 'Output has wrong dimensionality') - for i = 1, bSize do - mytester:assertTensorEq(output[i], A[i]:t() * B[i]:t(), 1e-10, - 'Output wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - - -- Test backward pass. - local gradOutput = torch.randn(bSize, M, P) - local gradInput = mm:backward({A, B}, gradOutput) - mytester:assert(#gradInput == 2, 'gradInput must be table of size 2') - local gradA, gradB = table.unpack(gradInput) - mytester:assertTableEq(gradA:size():totable(), A:size():totable(), - 'Gradient for input A has wrong size') - mytester:assertTableEq(gradB:size():totable(), B:size():totable(), - 'Gradient for input B has wrong size') - for i = 1, bSize do - mytester:assertTensorEq(gradA[i], B[i]:t() * gradOutput[i]:t(), 1e-10, - 'Gradient for input A wrong for bSize = ' .. bSize .. ' and i = ' .. i) - mytester:assertTensorEq(gradB[i], gradOutput[i]:t() * A[i]:t(), 1e-10, - 'Gradient for input B wrong for bSize = ' .. bSize .. ' and i = ' .. i) - end - end -end - -function nntest.DotProduct() - local indim = math.random(1,10) - - -- test 1D forward - local input = {torch.rand(indim),torch.rand(indim)} - local module = nn.DotProduct() - local expected = input[1]:dot(input[2]) - local output = module:forward(input) - mytester:assertlt(math.abs(expected-output[1]), precision, 'error on forward ') - - -- check gradients - -- Note: testJacobian doesn't support table inputs, and rather than re-write - -- it so that it does, I'll just use a split table module on the input. - -- I assume both SplitTable and Sequential do not have bugs, otherwise this - -- test will break. - local input = torch.rand(2,indim) - local module = nn.Sequential() - module:add(nn.SplitTable(1)) - module:add(nn.DotProduct()) - - local err = jac.testJacobian(module,input) - mytester:assertlt(err,precision, 'error on state ') - - -- IO - local ferr,berr = jac.testIO(module,input) - mytester:eq(ferr, 0, torch.typename(module) .. ' - i/o forward err ', precision) - mytester:eq(berr, 0, torch.typename(module) .. ' - i/o backward err ', precision) - - -- batch - -- rebuild module to avoid correlated tests - local module = nn.Sequential() - module:add(nn.SplitTable(1)) - module:add(nn.DotProduct()) - - local nframes = math.random(1,10) - local indim = math.random(1,10) - local input = torch.rand(2,nframes,indim) - - local err = jac.testJacobian(module,input) - mytester:assertlt(err,precision, 'batch error on state ') -end - -function nntest.CosineDistance() - local indim = math.random(1,10) - local input = {torch.rand(indim),torch.rand(indim)} - - -- check forward against previous implementation - local module = nn.CosineDistance() - - local w1 = input[1]:dot(input[2]) - local w2 = math.sqrt(input[1]:dot(input[1])) - local w3 = math.sqrt(input[2]:dot(input[2])) - local output_old = w1/w2/w3 - - local output = module:forward(input) - - mytester:assertlt(math.abs(output_old-output[1]),precision,'error on forward ') - - - -- check gradients - -- Note: testJacobian doesn't support table inputs, and rather than re-write - -- it so that it does, I'll just use a split table module on the input. - -- I assume both SplitTable and Sequential do not have bugs, otherwise this - -- test will break. - local input = torch.rand(2,indim) - local module = nn.Sequential() - module:add(nn.SplitTable(1)) - module:add(nn.CosineDistance()) - - local err = jac.testJacobian(module,input) - mytester:assertlt(err,precision, 'error on state ') - - -- IO - local ferr,berr = jac.testIO(module,input) - mytester:eq(ferr, 0, torch.typename(module) .. ' - i/o forward err ', precision) - mytester:eq(berr, 0, torch.typename(module) .. ' - i/o backward err ', precision) - - -- batch - -- rebuild module to avoid correlated tests - local module = nn.Sequential() - module:add(nn.SplitTable(1)) - module:add(nn.CosineDistance()) - - local nframes = math.random(1,10) - local indim = math.random(1,10) - local input = torch.rand(2,nframes,indim) - - local err = jac.testJacobian(module,input) - mytester:assertlt(err,precision, 'batch error on state ') - -end - -function nntest.CosineEmbeddingCriterion() - local v1 = torch.Tensor{1, 0} - local v2 = torch.Tensor{0.5, math.sqrt(3)*0.5} - - local crit = nn.CosineEmbeddingCriterion(0.6) - local output = crit:forward({v1, v2}, -1) -- must be Called before backward - local grads = crit:backward({v1, v2}, -1) - - local zero = torch.Tensor(2):zero() - equal(grads[1], zero, 'gradient should be zero') - equal(grads[2], zero, 'gradient should be zero') - - -- check jacobians - local margin = math.random()*2-1 - local dim = 5 - local batch_size = 1 - local crit = nn.CosineEmbeddingCriterion(margin) - local v = torch.rand(2,dim) - criterionJacobianTest1DTable(crit,v,1) - criterionJacobianTest1DTable(crit,v,-1) - - -- batch with hand-computed values - local v1 = torch.Tensor{{1, 0}, {0.5, math.sqrt(3)*0.5}} - local v2 = torch.Tensor{{0.5, math.sqrt(3)*0.5}, {1, 0}} - - local t = torch.Tensor{-1,-1} - local crit = nn.CosineEmbeddingCriterion(0.6) - local output = crit:forward({v1, v2}, t) -- must be Called before backward - local grads = crit:backward({v1, v2}, t) - - local zero = torch.Tensor(2,2):zero() - equal(grads[1], zero, 'gradient should be zero') - equal(grads[2], zero, 'gradient should be zero') - - -- batch, sizeAverage true, jacobian - local margin = math.random()*2-1 - local dim = 5 - local batch_size = 2 - local crit = nn.CosineEmbeddingCriterion(margin) - crit.sizeAverage = true - local v = torch.rand(2,batch_size,dim) - local t = torch.Tensor(batch_size):random(0,1):mul(2):add(-1) - criterionJacobianTest1DTable(crit,v,t) - - -- batch, sizeAverage false, jacobian - local margin = math.random()*2-1 - local crit = nn.CosineEmbeddingCriterion(margin) - crit.sizeAverage = false - local v = torch.rand(2,batch_size,dim) - local t = torch.Tensor(batch_size):random(0,1):mul(2):add(-1) - criterionJacobianTest1DTable(crit,v,t) -end - -function nntest.HingeEmbeddingCriterion() - local x = torch.Tensor{0.3,2.1,1.8,0} - local y = torch.Tensor{1,-1,-1,1} - local expgrads = torch.Tensor{1,0,-1,1} / 4 - - local crit = nn.HingeEmbeddingCriterion(2) - local output = crit:forward(x, y) -- must be called before backward - local grads = crit:backward(x, y) - - mytester:assert(math.abs(output - (0.3 + 0.2) / 4) < 1e-10) - equal(grads, expgrads) -end - -function nntest.Replicate() - local vector = torch.rand(3) - - local r1 = nn.Replicate(2, 1) - local r2 = nn.Replicate(2, 2) - - local vOutput1 = r1:forward(vector):clone() - local vOutput2 = r2:forward(vector):clone() - - local expected1 = torch.zeros(2, 3) - local expected2 = torch.zeros(3, 2) - expected1:select(1, 1):copy(vector) - expected1:select(1, 2):copy(vector) - expected2:select(2, 1):copy(vector) - expected2:select(2, 2):copy(vector) - - mytester:assertTensorEq(vOutput1, expected1, precision, 'Wrong tiling of data when replicating vector.') - mytester:assertTensorEq(vOutput2, expected2, precision, 'Wrong tiling of data when replicating vector.') - - -- batch mode - local vector = torch.rand(4,3) - - local r1 = nn.Replicate(2, 1, 1) - local r2 = nn.Replicate(2, 2, 1) - - local vOutput1 = r1:forward(vector):clone() - local vOutput2 = r2:forward(vector):clone() - - local expected1 = torch.zeros(4, 2, 3) - local expected2 = torch.zeros(4, 3, 2) - expected1:select(2, 1):copy(vector) - expected1:select(2, 2):copy(vector) - expected2:select(3, 1):copy(vector) - expected2:select(3, 2):copy(vector) - - mytester:assertTensorEq(vOutput1, expected1, precision, 'Wrong tiling of data when replicating batch vector.') - mytester:assertTensorEq(vOutput2, expected2, precision, 'Wrong tiling of data when replicating batch vector.') -end - -local function testBatchNormalization(moduleName, dim, k) - local planes = torch.random(1,k) - local size = { torch.random(2, k), planes } - for i=1,dim do - table.insert(size, torch.random(1,k)) - end - local input = torch.zeros(table.unpack(size)):uniform() - - local function jacTests(module, input, affine) - local err = jac.testJacobian(module,input) - mytester:assertlt(err,precision, 'error on state ') - - if affine then - local err = jac.testJacobianParameters(module, input, - module.weight, module.gradWeight) - mytester:assertlt(err,precision, 'error on weight ') - - local err = jac.testJacobianParameters(module, input, - module.bias, module.gradBias) - mytester:assertlt(err,precision, 'error on weight ') - - local err = jac.testJacobianUpdateParameters(module, input, module.weight) - mytester:assertlt(err,precision, 'error on weight [direct update] ') - - local err = jac.testJacobianUpdateParameters(module, input, module.bias) - mytester:assertlt(err,precision, 'error on bias [direct update] ') - - for t,err in pairs(jac.testAllUpdate(module, input, 'weight', 'gradWeight')) do - mytester:assertlt(err, precision, string.format( - 'error on weight [%s]', t)) - end - - for t,err in pairs(jac.testAllUpdate(module, input, 'bias', 'gradBias')) do - mytester:assertlt(err, precision, string.format('error on bias [%s]', t)) - end - end - - -- IO - local ferr,berr = jac.testIO(module,input) - mytester:eq(ferr, 0, torch.typename(module) .. ' - i/o forward err ', precision) - mytester:eq(berr, 0, torch.typename(module) .. ' - i/o backward err ', precision) - end - - local module = nn[moduleName](planes) - module:training() - jacTests(module, input, true) - module:evaluate() - jacTests(module, input, true) - jacTests(module, input[1], true) - - -- batch norm without affine transform - module = nn[moduleName](planes, 1e-5, 0.1, false) - module:training() - jacTests(module, input, false) - module:evaluate() - jacTests(module, input, false) - jacTests(module, input[1], false) -end - -function nntest.BatchNormalization() - testBatchNormalization('BatchNormalization', 0, 20) -end - -function nntest.SpatialBatchNormalization() - testBatchNormalization('SpatialBatchNormalization', 2, 6) -end - -function nntest.VolumetricBatchNormalization() - testBatchNormalization('VolumetricBatchNormalization', 3, 4) -end - -function nntest.GradientReversal() - local ini = math.random(3,5) - local inj = math.random(3,5) - local ink = math.random(3,5) - local input = torch.Tensor(ini,inj,ink):zero() - -- Two GradientReversal layers should cancel each other out - local module = nn.Sequential() - module:add(nn.GradientReversal()) - module:add(nn.GradientReversal()) - - local err = jac.testJacobian(module,input, 0.1, 10) - mytester:assertlt(err,precision, 'error on state ') - - local ferr,berr = jac.testIO(module,input, 0.1, 10) - mytester:eq(ferr, 0, torch.typename(module) .. ' - i/o forward err ', precision) - mytester:eq(berr, 0, torch.typename(module) .. ' - i/o backward err ', precision) -end - -function nntest.Padding() - local fanin = math.random(1,3) - local sizex = math.random(4,16) - local sizey = math.random(4,16) - local pad = math.random(-3,3) - local index = math.random(1, fanin) - local val = torch.randn(1):squeeze() - local module = nn.Padding(1, pad, 3, val, index) - local input = torch.rand(fanin,sizey,sizex) - local size = input:size():totable() - size[1] = size[1] + math.abs(pad) - - local output = module:forward(input) - mytester:assertTableEq(size, output:size():totable(), 0.00001, "Padding size error") - - local gradInput = module:backward(input, output) - mytester:assertTensorEq(gradInput, input, 0.00001, "Padding backward error") -end - -function nntest.addSingletonDimension() - local dims = torch.random(5) - local size = torch.LongTensor(dims):random(10) - local perm = torch.randperm(dims):totable() - local tensor = torch.Tensor(table.unpack(size:totable())):uniform():permute(table.unpack(perm)) - size = torch.gather(size, 1, torch.LongTensor(perm)) - - local firstDim = nn.utils.addSingletonDimension(tensor) - mytester:assertTableEq(firstDim:size():totable(), {1, table.unpack(size:totable())}, - "wrong size for singleton dimension 1") - mytester:assertTensorEq(firstDim[1], tensor, 0, - "wrong content for singleton dimension 1") - - local dim = torch.random(dims + 1) - local result = nn.utils.addSingletonDimension(tensor, dim) - local resultSize = size:totable() - table.insert(resultSize, dim, 1) - mytester:assertTableEq(result:size():totable(), resultSize, - "wrong size for random singleton dimension") - mytester:assertTensorEq(result:select(dim, 1), tensor, 0, - "wrong content for random singleton dimension") - - mytester:assertError(function() nn.utils.addSingletonDimension(tensor, dims + 2) end, - "invalid dimension not detected") - - -- passing output tensor as argument - local resultArg = torch.Tensor() - local resultR = nn.utils.addSingletonDimension(resultArg, tensor, dim) - mytester:eq(resultArg:size():totable(), resultSize, - 'wrong content for random singleton dimension '.. - 'when the result is passed as argument') - mytester:eq(resultArg, result, 'wrong content for random singleton dimension '.. - 'when the result is passed as argument') - - mytester:eq(resultR == resultArg, true, - 'new tensor is created when it should use the provided tensor') -end - -function nntest.SpatialReflectionPadding() - local batch = math.random(1,3) - local plane = math.random(1,3) - local sizeY = math.random(7,16) - local sizeX = math.random(7,16) - local padL = math.random(-3,3) - local padR = math.random(-3,3) - local padT = math.random(-3,3) - local padB = math.random(-3,3) - local jac = nn.Jacobian - local layer = nn.SpatialReflectionPadding(padL, padR, padT, padB) - local input = torch.rand(batch, plane, sizeY, sizeX) - local err = jac.testJacobian(layer, input) - mytester:assertalmosteq(err, 0.0, 1e-7) -end - -function nntest.SpatialReplicationPadding() - local batch = math.random(1,3) - local plane = math.random(1,3) - local sizeY = math.random(7,16) - local sizeX = math.random(7,16) - local padL = math.random(-3,3) - local padR = math.random(-3,3) - local padT = math.random(-3,3) - local padB = math.random(-3,3) - local jac = nn.Jacobian - local layer = nn.SpatialReplicationPadding(padL, padR, padT, padB) - local input = torch.rand(batch, plane, sizeY, sizeX) - local err = jac.testJacobian(layer, input) - mytester:assertalmosteq(err, 0.0, 1e-7) -end - -function nntest.VolumetricReplicationPadding() - for batch = 0, 1 do - local nbatch - if batch == 1 then - nbatch = math.random(1,3) - end - local plane = math.random(1,3) - local sizeZ = math.random(1,4) - local sizeY = math.random(7,11) - local sizeX = math.random(7,11) - local padLeft = math.random(-3,3) - local padRight = math.random(-3,3) - local padTop = math.random(-3,3) - local padBottom = math.random(-3,3) - local padFront = math.random(3,3) - local padBack = math.random(3,3) - local jac = nn.Jacobian - local layer = - nn.VolumetricReplicationPadding(padLeft, padRight, padTop, - padBottom, padFront, padBack) - local input - if batch == 1 then - input = torch.rand(nbatch, plane, sizeZ, sizeY, sizeX) - else - input = torch.rand(plane, sizeZ, sizeY, sizeX) - end - local err = jac.testJacobian(layer, input) - mytester:assertalmosteq(err, 0.0, 1e-7) - end -end - -function nntest.PixelShuffle() - -- Checks whether a given tensor has the specified size - local function tensorHasSize(tensor, size) - local tensorSize = tensor:size() - - if tensorSize:size() ~= #size then - return false - end - for i,v in ipairs(size) do - if tensorSize[i] ~= size[i] then - return false - end - end - return true - end - - --Verifies that the output is the input re-shuffled as per Eq 4. in - -- "Real-Time Single Image and Video Super-Resolution Using an Efficient - -- Sub-Pixel Convolutional Neural Network", Shi et al. - -- @param - the input, low-resolution image of shape [1, c, h , w] - -- @param - the output, super resolved image of shape [1, c, h ,w] - -- @param - upscale factor of the super resolutin - -- @returns true if output complies with Eq 4. - local function verifyPixelShuffle(_input, _output, upscaleFactor) - local input = _input - local output = _output - - if input:nDimension() == 3 then - input = input:view(1, input:size(1), input:size(2), input:size(3)) - output = output:view(1, output:size(1), output:size(2), output:size(3)) - end - - for c = 1, output:size(2) do - for h = 1, output:size(3) do - for w = 1, output:size(4) do - local heightIdx = torch.floor((h - 1)/upscaleFactor) + 1 - local widthIdx = torch.floor((w - 1)/upscaleFactor) + 1 - --c does not need to be (c - 1) as it starts at 1 not zero - local channelIdx = upscaleFactor * ((h-1) % upscaleFactor) + ((w-1) % upscaleFactor) + 1 + (c-1)*upscaleFactor*upscaleFactor - - mytester:assertTensorEq(output[{{}, {c}, {h}, {w}}], input[{{}, {channelIdx}, {heightIdx}, {widthIdx}}], - string.format("output at location (%d, %d, %d) is incorrect", c, h, w)) - end - end - end - return true - end - - -- Checks the nn.PixelShuffle layer's forward pass. It checks that is - -- re-arranges input pixels correctly according to Eq. 4 of - -- "Real-Time Single Image and Video Super-Resolution Using an Efficient - -- Sub-Pixel Convolutional Neural Network", Shi et al. - -- This function tests for multip batch sizes, multiple channels and multiple input dimensions (square) - -- It also tests for normal tensors (un-batched) - local function testPixelShuffleUpdateOutput() - --Test with batched input - for h = 1, 3 do - local batchSize = torch.round(torch.uniform(1, 3)) - for i = 1, 3 do - local upscaleFactor = torch.round(torch.uniform(2,5)) - local pixelShuffle = nn.PixelShuffle(upscaleFactor) - for j = 1, 3 do - local channels = torch.round(torch.uniform(1, 4)) - for k = 1, 3 do - - local inputDim = torch.round(torch.uniform(5, 10)) - local input = torch.Tensor(batchSize, channels * upscaleFactor * upscaleFactor, inputDim, inputDim) - input:uniform() - - local output = pixelShuffle:forward(input) - local expectedOutputDim = inputDim * upscaleFactor - mytester:assert(tensorHasSize(output, {batchSize, channels, expectedOutputDim, expectedOutputDim}), - string.format("Output tensor should have size (%d, %d, %d, %d) not %s", batchSize, channels, expectedOutputDim, expectedOutputDim, tostring(output:size()))) - verifyPixelShuffle(input, output, upscaleFactor) - end - end - end - end - - --Test with non-batched input - local inputDim = torch.round(torch.uniform(5, 10)) - local channels = torch.round(torch.uniform(1, 4)) - local upscaleFactor = torch.round(torch.uniform(2,5)) - - local input = torch.Tensor(channels * upscaleFactor * upscaleFactor, inputDim, inputDim) - input:uniform() - - local pixelShuffle = nn.PixelShuffle(upscaleFactor) - local output = pixelShuffle:forward(input) - local expectedOutputDim = inputDim * upscaleFactor - mytester:assert(tensorHasSize(output, {channels, expectedOutputDim, expectedOutputDim}), - string.format("Output tensor should have size (%d, %d, %d) not %s", channels, expectedOutputDim, expectedOutputDim, tostring(output:size()))) - - verifyPixelShuffle(input, output, upscaleFactor) - end - - -- Checks the nn.PixelShuffle layer's backward pass. It checks that is - -- essentially performs the inverse of Eq 4. in - -- "Real-Time Single Image and Video Super-Resolution Using an Efficient - -- Sub-Pixel Convolutional Neural Network", Shi et al. - -- This function tests for multip batch sizes, multiple channels and multiple input dimensions (square) - -- It also tests for normal tensors (un-batched) - local function testPixelShuffleUpdateGradInput() - --Test with batched input - for h = 1, 3 do - local batchSize = torch.round(torch.uniform(1, 3)) - for i = 1, 3 do - local upscaleFactor = torch.round(torch.uniform(2,5)) - local pixelShuffle = nn.PixelShuffle(upscaleFactor) - for j = 1, 3 do - local channels = torch.round(torch.uniform(1, 4)) - for k = 1, 3 do - local inputDim = torch.round(torch.uniform(5, 10)) - local input = torch.Tensor(batchSize, channels * upscaleFactor * upscaleFactor, inputDim, inputDim) - - input:uniform() - - local output = pixelShuffle:forward(input) - --here we treat output as the same as gradOutput as they have the same shape - local reconstructedInput = pixelShuffle:backward(input, output) - mytester:assertTensorEq(reconstructedInput, input, 0) - end - end - end - end - - --Test with non-batched input - local inputDim = torch.round(torch.uniform(5, 10)) - local channels = torch.round(torch.uniform(1, 4)) - local upscaleFactor = torch.round(torch.uniform(2,5)) - local input = torch.Tensor(channels * upscaleFactor * upscaleFactor, inputDim, inputDim) - input:uniform() - - local pixelShuffle = nn.PixelShuffle(upscaleFactor) - local output = pixelShuffle:forward(input) - --here we treat output as the same as gradOutput as they have the same shape - local reconstructedInput = pixelShuffle:backward(input, output) - mytester:assertTensorEq(reconstructedInput, input, 0) - - local err = jac.testJacobian(pixelShuffle, input) - mytester:assertlt(err,precision, "error computing gradiens w.r.t. inputs") - end - - local function testModuleIO() - --Test with non-batched input - local inputDim = torch.round(torch.uniform(5, 10)) - local channels = torch.round(torch.uniform(1, 4)) - local upscaleFactor = torch.round(torch.uniform(2,5)) - local input = torch.Tensor(channels * upscaleFactor * upscaleFactor, inputDim, inputDim):uniform() - local pixelShuffle = nn.PixelShuffle(upscaleFactor) - - local fwdErr,bkwdErr = jac.testIO(pixelShuffle,input) - mytester:asserteq(fwdErr, 0, torch.typename(pixelShuffle) .. " - i/o forward err ") - mytester:asserteq(bkwdErr, 0, torch.typename(pixelShuffle) .. " - i/o backward err ") - end - - testPixelShuffleUpdateOutput() - testPixelShuffleUpdateGradInput() - testModuleIO() -end - -function nntest.Typecast() - local function make_network() - local seq = nn.Sequential() - seq:add(nn.Linear(15, 10)) - seq:add(nn.Linear(15, 10)) - seq.modules[1].bias:fill(1) - seq.modules[2].bias:fill(2) - return seq - end - - -- make sure that the typecasts aren't nops - assert(torch.getdefaulttensortype() == 'torch.DoubleTensor') - - -- basic net - local net = make_network() - net.modules[1].empty_tensor = torch.Tensor() - net:float() - assert(net.modules[1].bias:type() == 'torch.FloatTensor', - net.modules[1].bias:type()) - assert(net.modules[1].empty_tensor:type() == 'torch.FloatTensor') - assert(net.modules[1].bias ~= net.modules[2].bias) - net.modules[1].bias:fill(3) - assert(net.modules[1].bias[1] == 3) - assert(net.modules[2].bias[1] == 2) - - -- shared tensors remain shared - local net = make_network() - net.modules[2].bias = net.modules[1].bias - net:float() - assert(net.modules[1].bias:type() == 'torch.FloatTensor') - assert(net.modules[1].bias == net.modules[2].bias) - assert(net.modules[1].bias[1] == 1) - - -- shared storages remain shared - local net = make_network() - net.modules[2].bias:set(net.modules[1].bias) - local net = net:float() - assert(net.modules[1].bias:type() == 'torch.FloatTensor') - assert(net.modules[1].bias ~= net.modules[2].bias) - net.modules[1].bias:fill(3) - assert(net.modules[1].bias[1] == 3) - assert(net.modules[2].bias[1] == 3) - - -- tricky: overlapping views on the same storage are preserved - local net = make_network() - local overlap_storage = torch.Tensor(15):fill(1) - net.modules[1].bias = overlap_storage:narrow(1, 1, 10) - net.modules[2].bias = overlap_storage:narrow(1, 6, 10) - net:float() - assert(net.modules[1].bias:type() == 'torch.FloatTensor') - assert(net.modules[1].bias ~= net.modules[2].bias) - net.modules[1].bias:fill(3) - assert(net.modules[1].bias[1] == 3) - assert(net.modules[2].bias[1] == 3) - assert(net.modules[2].bias[6] == 1) -- only the first 5 elements overlapped - - -- check recursiveType on a table - local net1 = make_network() - local net2 = make_network() - net2.modules[1].bias:set(net1.modules[1].bias) - net1:float() - net2:float() - net1.modules[1].bias:fill(3) - assert(net2.modules[1].bias[1] == 1) - - local net1 = make_network() - local net2 = make_network() - net2.modules[1].bias:set(net1.modules[1].bias) - - local tensorCache = {} - net1:type('torch.FloatTensor', tensorCache) - net2:type('torch.FloatTensor', tensorCache) - net1.modules[1].bias:fill(3) - assert(net2.modules[1].bias[1] == 3) - - local net1 = make_network() - local net2 = make_network() - net2.modules[1].bias:set(net1.modules[1].bias) - - nn.utils.recursiveType({net1, net2}, 'torch.FloatTensor') - net1.modules[1].bias:fill(3) - assert(net2.modules[1].bias[1] == 3) - - -- smoke test some modules with custom type methods - local custom_type_modules = { - nn.MixtureTable(3), - nn.ConcatTable(), - nn.Copy(), - nn.Copy(nil, nil, nil, true), - nn.SpatialContrastiveNormalization(), - nn.DotProduct(), - nn.PairwiseDistance(1), - nn.SpatialDivisiveNormalization(), - nn.SpatialSubtractiveNormalization() - } - for _, module in ipairs(custom_type_modules) do - module:float() - end -end - -function nntest.Module_apply() - local s = nn.Sequential() - s:add(nn.Linear(10,10)) - local s2 = nn.Sequential() - s2:add(nn.Linear(10,5)) - s:add(s2) - s:add(nn.Tanh()) - - local seen = 0 - s:apply(function(module) - if torch.type(module) == 'nn.Linear' then - module.bias:resize(20) - seen = seen + 1 - end - end) - mytester:asserteq(seen, 2) - mytester:asserteq(s.modules[1].bias:size(1), 20) - mytester:asserteq(s2.modules[1].bias:size(1), 20) -end - -function nntest.Module_replace() - -- test replace in container - local s = nn.Sequential() - s:add(nn.Linear(10,10)) - s:add(nn.Sigmoid()) - s:replace(function(module) - return torch.type(module) == 'nn.Sigmoid' and nn.Tanh() or module - end) - -- test replace of a single module - local single = nn.Tanh() - local replaced = single:replace(function(module) - return torch.type(module) == 'nn.Tanh' and nn.Sigmoid() or module - end) - mytester:asserteq(torch.type(s:get(2)), 'nn.Tanh', 'replace in container') - mytester:asserteq(torch.type(replaced), 'nn.Sigmoid', 'replace in single module') -end - -function nntest.Cosine() - local inputSize = 4 - local outputSize = 5 - - -- test 1D - local input = torch.randn(inputSize) - local gradOutput = torch.randn(outputSize) - local cosine = nn.Cosine(inputSize,outputSize) - local output = cosine:forward(input) - local inputNorm = input:norm()+1e-12 - local weight2 = cosine.weight[2] - local output2 = torch.dot(weight2, input)/((weight2:norm()+1e-12)*inputNorm) - mytester:assert(math.abs(output2 - output[2]) < 0.000001,"Cosine output 1D err weight[2]") - local output2 = torch.mv(cosine.weight, input) - output2:cdiv(cosine.weight:norm(2,2)+1e-12):div(inputNorm) - mytester:assertTensorEq(output, output2, 0.000001, "Cosine output 1D err") - local gradInput = cosine:updateGradInput(input, gradOutput) - local gradInput2 = gradInput:clone():zero() - for j=1,outputSize do - local w_j = cosine.weight[j] - local nw_j = w_j:norm()+1e-12 - for i=1,inputSize do - local w_ij = w_j[i] - local grad_i = (w_ij/(inputNorm*nw_j)) - grad_i = grad_i - (output[j]*input[i]/(inputNorm*inputNorm)) - grad_i = grad_i * gradOutput[j] - gradInput2[i] = gradInput2[i] + grad_i - end - end - mytester:assertTensorEq(gradInput2, gradInput, 0.000001, "Cosine gradInput 1D err") - cosine:zeroGradParameters() - cosine:accGradParameters(input, gradOutput, 1) - local gradWeight2 = cosine.weight:clone():zero() - for j=1,outputSize do - local w_j = cosine.weight[j] - local nw_j = w_j:norm()+1e-12 - for i=1,inputSize do - local w_ij = w_j[i] - local gW_ij = (gradOutput[j]/nw_j) * ( ( input[i] / inputNorm ) - (output[j] * w_ij / nw_j) ) - gradWeight2[{j,i}] = gW_ij - end - end - mytester:assertTensorEq(cosine.gradWeight, gradWeight2, 0.000001, "Cosine gradWeight 2D err") - - -- test 2D - local batchSize = 3 - local input = torch.randn(batchSize, inputSize) - local gradOutput = torch.randn(batchSize, outputSize) - cosine:zeroGradParameters() - local cosine2 = cosine:clone() - local output = cosine:forward(input) - local output2 = cosine2:forward(input[2]) - mytester:assertTensorEq(output[2], output2, 0.000001, "Cosine output 2D err") - local gradInput = cosine:backward(input, gradOutput) - - local gradInput2 = gradInput:clone():zero() - for i=1,batchSize do - cosine2:forward(input[i], gradOutput[i]) - gradInput2[i]:copy(cosine2:backward(input[i], gradOutput[i])) - end - mytester:assertTensorEq(gradInput, gradInput2, 0.000001, "Cosine gradInput 2D err") - mytester:assertTensorEq(cosine.gradWeight, cosine2.gradWeight, 0.000001, "Cosine gradWeight 2D err") -end - -function nntest.DistanceRatioCriterion() - local sizeAverage = true - local crit = nn.DistanceRatioCriterion(sizeAverage) - local X = torch.rand(32,1):fill(1) - local Y = torch.rand(32,1):fill(1) - - -- Unit Test updateOutput - local loss = crit:forward({X, Y}) - local trueLoss = 1 + math.log(math.exp(-1) + math.exp(-1)) - assert(math.abs(loss - trueLoss) < 0.000001, - "DistanceRatioCriterion forward incorrect output") - - -- Unit Test updateGradInput - local dxdy = crit:backward({X, Y}) - local dx = dxdy[1] - local dy = dxdy[2] - assert(math.abs(dx:sum() - 0.5) < 0.000001, - "DistanceRatioCriterion backward (dx) incorrect output") - assert(math.abs(dy:sum() + 0.5) < 0.000001, - "DistanceRatioCriterion backward (dy) incorrect output") -end - -function nntest.ErrorHandling() - local l = nn.Linear(1, 1) - local p = nn.Parallel(1, 1):add(l) - local c = nn.Concat(1):add(p) - local model = nn.Sequential():add(nn.Identity()):add(c):add(nn.Identity()) - local function errmsg(module, i) - return 'In ' .. i .. ' module of ' .. torch.type(module) .. ':\n' - end - local expected_err = errmsg(model, 2) .. errmsg(c, 1) .. errmsg(p, 1) - mytester:assertErrorObj( - function() - model:forward(torch.randn(1,2,2)) - end, - function(err) - return err:find(expected_err) and err:find('size mismatch') - end, - "Failure expected or bad error message (missing information or reason)" - ) -end - -function nntest.GPU() - -- this is a placeholder to let you know that the nn.GPU unit test - -- is located in cunn package. -end - -function nntest.Profile() - local mx_overhead = 0.05 - local print_every = 3 - local net = nn.Profile(nn.Linear(3,4), print_every) - local input, gradOutput = torch.randn(1, 3), torch.randn(1, 4) - local output, gradInput = net:forward(input), net:backward(input, gradOutput) - mytester:assertTensorEq(net.modules[1].output, output, 0.000001) - mytester:assertTensorEq(net.modules[1].gradInput, gradInput, 0.000001) -end - -function nntest.NaN() - local _ = require 'moses' - local input = torch.randn(2,3) - local gradOutput = torch.randn(2,4) - local lin = nn.Linear(3,4) - lin:zeroGradParameters() - local nan = nn.NaN(lin) - mytester:assert(nan.id == 1) - -- test that it works when no NaNs are present - local output = nan:forward(input):clone() - local gradInput = nan:backward(input, gradOutput):clone() - local gradWeight = lin.gradWeight:clone() - local gradBias = lin.gradBias:clone() - lin:zeroGradParameters() - local output2 = lin:forward(input) - local gradInput2 = lin:backward(input, gradOutput) - mytester:assertTensorEq(output, output2, 0.000001) - mytester:assertTensorEq(gradInput, gradInput2, 0.000001) - mytester:assertTensorEq(gradWeight, lin.gradWeight, 0.000001) - mytester:assertTensorEq(gradBias, lin.gradBias, 0.000001) - -- test with some NaNs - input:zero():log():log() - local sum = input:sum() - mytester:assert(_.isNaN(sum)) - mytester:assert(not pcall(function() nan:forward(input) end)) - lin.bias:fill(sum) - input = torch.randn(2,3) - mytester:assert(not pcall(function() nan:forward(input) end)) - lin.bias:uniform(0,1) - gradOutput:fill(sum) - mytester:assert(not pcall(function() nan:backward(input, gradOutput) end)) - gradOutput:uniform(0,1) - lin.gradBias:fill(sum) - mytester:assert(not pcall(function() nan:backward(input, gradOutput) end)) -end - -function nntest.DontCast() - local input = torch.randn(3,4) - local gradOutput = torch.randn(3,2) - local linear = nn.Linear(4,2):float() - local mlp = nn.DontCast(linear, true) - linear:zeroGradParameters() - local linear = linear:clone() - local output = mlp:forward(input) - local gradInput = mlp:backward(input, gradOutput) - mytester:assert(torch.type(output) == 'torch.DoubleTensor') - mytester:assert(torch.type(gradInput) == 'torch.DoubleTensor') - local output2 = linear:forward(input:float()) - local gradInput2 = linear:backward(input:float(), gradOutput:float()) - mytester:assertTensorEq(output:float(), output2, 0.000001) - mytester:assertTensorEq(gradInput:float(), gradInput2, 0.000001) - local mlp3 = nn.DontCast(linear:clone()) - mlp3:zeroGradParameters() - local output3 = mlp3:forward(input:float()) - local gradInput3 = mlp3:backward(input:float(), gradOutput:float()) - mytester:assert(torch.type(output3) == 'torch.FloatTensor') - mytester:assert(torch.type(gradInput3) == 'torch.FloatTensor') - mytester:assertTensorEq(output3, output2, 0.000001) - mytester:assertTensorEq(gradInput3, gradInput2, 0.000001) - - mlp:float() - local output4 = mlp:forward(input:float()) - local gradInput4 = mlp:backward(input:float(), gradOutput:float()) - mytester:assert(torch.type(output4) == 'torch.FloatTensor') - mytester:assert(torch.type(gradInput4) == 'torch.FloatTensor') - mytester:assertTensorEq(output3, output4, 0.000001) - mytester:assertTensorEq(gradInput3, gradInput4, 0.000001) - mlp:double() - mytester:assert(torch.type(linear.output) == 'torch.FloatTensor') - local output = mlp:forward(input) - local gradInput = mlp:backward(input, gradOutput) - mytester:assert(torch.type(output4) == 'torch.FloatTensor') - mytester:assert(torch.type(gradInput4) == 'torch.FloatTensor') - mytester:assertTensorEq(output3, output:float(), 0.000001) - mytester:assertTensorEq(gradInput3, gradInput:float(), 0.000001) - - -- test table inputs/outputs - local input = {torch.randn(3,4), torch.randn(3,4)} - local gradOutput = {torch.randn(3,2), torch.randn(3,2)} - local linear = nn.ParallelTable():add(nn.Linear(4,2)):add(nn.Linear(4,2)):float() - local mlp = nn.DontCast(linear, true) - linear:zeroGradParameters() - local linear = linear:clone() - local output = mlp:forward(input) - local gradInput = mlp:backward(input, gradOutput) - mytester:assert(torch.type(output[1]) == 'torch.DoubleTensor') - mytester:assert(torch.type(gradInput[1]) == 'torch.DoubleTensor') - mytester:assert(torch.type(output[2]) == 'torch.DoubleTensor') - mytester:assert(torch.type(gradInput[2]) == 'torch.DoubleTensor') - local _ = require 'moses' - local finput = _.map(input, function(k,v) return v:float() end) - local foutput = _.map(output, function(k,v) return v:float() end) - local fgradInput = _.map(gradInput, function(k,v) return v:float() end) - local fgradOutput = _.map(gradOutput, function(k,v) return v:float() end) - local output2 = linear:forward(finput) - local gradInput2 = linear:backward(finput, fgradOutput) - mytester:assertTensorEq(foutput[1], output2[1], 0.000001) - mytester:assertTensorEq(foutput[2], output2[2], 0.000001) - mytester:assertTensorEq(fgradInput[1], gradInput2[1], 0.000001) - mytester:assertTensorEq(fgradInput[2], gradInput2[2], 0.000001) - local mlp3 = nn.DontCast(linear:clone()) - mlp3:zeroGradParameters() - local output3 = mlp3:forward(finput) - local gradInput3 = mlp3:backward(finput, fgradOutput) - mytester:assert(torch.type(output3[1]) == 'torch.FloatTensor') - mytester:assert(torch.type(gradInput3[1]) == 'torch.FloatTensor') - mytester:assert(torch.type(output3[2]) == 'torch.FloatTensor') - mytester:assert(torch.type(gradInput3[2]) == 'torch.FloatTensor') - mytester:assertTensorEq(output3[1], output2[1], 0.000001) - mytester:assertTensorEq(gradInput3[1], gradInput2[1], 0.000001) - mytester:assertTensorEq(output3[2], output2[2], 0.000001) - mytester:assertTensorEq(gradInput3[2], gradInput2[2], 0.000001) - mlp:float() - local output4 = mlp:forward(finput) - local gradInput4 = mlp:backward(finput, fgradOutput) - mytester:assert(torch.type(output4[1]) == 'torch.FloatTensor') - mytester:assert(torch.type(gradInput4[1]) == 'torch.FloatTensor') - mytester:assert(torch.type(output4[2]) == 'torch.FloatTensor') - mytester:assert(torch.type(gradInput4[2]) == 'torch.FloatTensor') - mytester:assertTensorEq(output3[1], output4[1], 0.000001) - mytester:assertTensorEq(gradInput3[1], gradInput4[1], 0.000001) - mytester:assertTensorEq(output3[2], output4[2], 0.000001) - mytester:assertTensorEq(gradInput3[2], gradInput4[2], 0.000001) - mlp:double() - mytester:assert(torch.type(linear.output) == 'table') - mytester:assert(torch.type(linear.output[1]) == 'torch.FloatTensor') - mytester:assert(torch.type(linear.output[2]) == 'torch.FloatTensor') - local output = mlp:forward(input) - local gradInput = mlp:backward(input, gradOutput) - mytester:assertTensorEq(output3[1], output[1]:float(), 0.000001) - mytester:assertTensorEq(gradInput3[1], gradInput[1]:float(), 0.000001) -end - -function nntest.SpatialDepthWiseConvolution() - local epsilon = 0.00001 - - local SC = nn.SpatialConvolution - local SDWC = nn.SpatialDepthWiseConvolution - - local function spatialDepthWiseConv( - nInputPlane, multiplier, kernel, stride, padding, inputSize, weight, bias - ) - local conv = SDWC(nInputPlane, multiplier, kernel, kernel, stride, stride, padding, padding) - conv.weight = weight - conv.bias = bias - return conv - end - - -- Utility spatialDepthWiseConv_util() function -------------------------------- - -- By Alfredo Canziani, alfredo.canziani@gmail.com ----------------------------- - local function spatialDepthWiseConv_util( - nInputPlane, multiplier, kernel, stride, padding, inputSize, weight, bias - ) - - local conv = nn.Sequential() - conv:add(nn.Contiguous()) - conv:add(nn.View(-1, 1, inputSize, inputSize)) - conv:add(SC(1, multiplier, kernel, kernel, stride, stride, padding, padding)) - - local depthWiseConv = nn.Parallel(2, 2) - for channel = 1, nInputPlane do - local tempConv = conv:clone() - tempConv:get(3).weight = weight:narrow(2, channel, 1):clone() - tempConv:get(3).bias = bias:select(2, channel):clone() - depthWiseConv:add(tempConv) - end - depthWiseConv:add(nn.Contiguous()) - return depthWiseConv - end - - local n = 3 -- nInputPlane - local s = 28 -- input height and width - local b = 3 -- batch size - local m = 4 -- multiplier - local k = 3 -- kernel size - local p = 1 -- padding - local st = 1 -- stride - - local testBatch = 1e3 -- number of repetition - - local X = torch.rand(b, n, s, s) -- 1x3x299x299 images - local weight = torch.rand(m, n, k, k) -- weight - local bias = torch.rand(m, n) -- bias - - local model = spatialDepthWiseConv(n, m, k, st, p, s, weight, bias) - local model_util = spatialDepthWiseConv_util(n, m, k, st, p, s, weight, bias) - - local Y_util = model_util:forward(X) - local Y = model:forward(X) - - local abs_diff = Y_util:clone():csub(Y):abs() - mytester:assert(torch.all(abs_diff:lt(epsilon))) -end - -function nntest.Constant() - local input = torch.randn(20,3,7) - local gradOutput = torch.randn(20,30,6) - local value = torch.randn(30,6) - local const = nn.Constant(value:clone(), 2) - local output = const:forward(input) - local gradInput = const:backward(input, output) - local output2 = value:view(1,30,6):expand(20,30,6) - mytester:assertTensorEq(output2, output, 0.000001, "Constant forward err") - mytester:assertTensorEq(gradInput, input:zero(), 0.000001, "Constant backward err") -end - -function nntest.WhiteNoise() - local input = torch.zeros(3, 28, 28) - local addNoise = nn.WhiteNoise() - local output = addNoise:forward(input) - local meanValue = output:mean() - local stdValue = output:std() - mytester:assert(meanValue > -0.01 and meanValue < 0.01) - mytester:assert(stdValue < 0.15 and stdValue >= 0) - - -- Evaluate - addNoise:evaluate() - output = addNoise:forward(input) - meanValue = output:mean() - stdValue = output:std() - mytester:assert(meanValue == 0) - mytester:assert(stdValue == 0) - - -- backprop - addNoise:training() - local gradOutput = torch.rand(3, 28, 28) - local gradInput = addNoise:updateGradInput(input, gradOutput) - mytester:assertTensorEq(gradOutput, gradInput, 0.000001, "WhiteNoise backward err") -end - -function nntest.OneHot() - local nClass = 10 - - -- batch mode - local batchSize = 3 - local input = torch.LongTensor(batchSize):random(1, nClass) - local gradOutput = torch.randn(batchSize, nClass) - - local oh = nn.OneHot(nClass) - - local output = oh:forward(input) - local output2 = torch.Tensor(batchSize, nClass):zero() - local eye = torch.eye(nClass) - output2:index(eye, 1, input) - mytester:assertTensorEq(output, output2, 0.000001, "OneHot forward batch err") - mytester:assert(output:dim() == 2) - - -- non-batch mode (number input) - local num = 3 - local output3 = torch.zeros(nClass) - output3[num] = 1.0 - mytester:assertTensorEq(oh:forward(num), output3, 0.000001, "OneHot forward number err") - - local gradInput = oh:backward(input, gradOutput) - mytester:assertTensorEq(gradInput, input:double():zero(), 0.000001, "OneHot backward batch err") - - if pcall(function() require 'cunn' end) then - oh:cuda() - - -- test with long input - local output = oh:forward(input) - mytester:assert(torch.type(output) == 'torch.CudaTensor') - mytester:assertTensorEq(output:double(), output2, 0.000001, "OneHot forward batch long-cuda err") - - -- test with cuda input - local input = input:cuda() - gradOutput = gradOutput:cuda() - - local output = oh:forward(input) - mytester:assert(torch.type(output) == 'torch.CudaTensor') - mytester:assertTensorEq(output:double(), output2, 0.000001, "OneHot forward batch cuda err") - - local gradInput2 = oh:backward(input, gradOutput) - mytester:assertTensorEq(gradInput, gradInput2:double(), 0.000001, "OneHot backward batch err") - cutorch.synchronize() - - -- non-batch mode (number input) - mytester:assertTensorEq(oh:forward(num), output3:cuda(), 0.000001, "OneHot forward number err") - end - - -- multi-dimensional input - local inputSize = 2 - local input = torch.LongTensor(batchSize, inputSize):random(1, nClass) - local gradOutput = torch.randn(batchSize, inputSize, nClass) - - local oh = nn.OneHot(nClass, 2) - - local output = oh:forward(input) - local output2 = torch.Tensor(batchSize*inputSize, nClass):zero() - local eye = torch.eye(nClass) - output2:index(eye, 1, input:view(-1)) - output2:resize(batchSize, inputSize, nClass) - mytester:assertTensorEq(output, output2, 0.000001, "OneHot 2d forward batch err") - mytester:assert(output:dim() == 3) - - local gradInput = oh:backward(input, gradOutput) - mytester:assertTensorEq(gradInput, input:double():zero(), 0.000001, "OneHot 2d backward batch err") - - if pcall(function() require 'cunn' end) then - oh:cuda() - - -- test with long input - local output = oh:forward(input) - mytester:assert(torch.type(output) == 'torch.CudaTensor') - mytester:assertTensorEq(output:double(), output2, 0.000001, "OneHot 2d forward batch long-cuda err") - - -- test with cuda input - local input = input:cuda() - gradOutput = gradOutput:cuda() - - local output = oh:forward(input) - mytester:assert(torch.type(output) == 'torch.CudaTensor') - mytester:assertTensorEq(output:double(), output2, 0.000001, "OneHot 2d forward batch cuda err") - - local gradInput2 = oh:backward(input, gradOutput) - mytester:assertTensorEq(gradInput, gradInput2:double(), 0.000001, "OneHot 2d backward batch err") - - local benchmark = false - if benchmark then - local input = torch.FloatTensor(50, 50):random(1,65):cuda() - - local oh = nn.OneHot(65):cuda() - - oh:forward(input) - cutorch.synchronize() - local a = torch.Timer() - for i=1,10 do - oh:forward(input) - end - cutorch.synchronize() - local gputime = a:time().real - - oh:float() - input = input:float() - oh:forward(input) - a = torch.Timer() - for i=1,10 do - oh:forward(input) - end - local cputime = a:time().real - print("Onehot GPU vs CPU time", gputime, cputime) - end - end -end - -function nntest.ZeroGrad() - local input = torch.randn(3,4) - local zg = nn.ZeroGrad() - local output = zg:forward(input) - mytester:assertTensorEq(input, output, 0.00000001) - local gradInput = zg:backward(input, input) - local gradInput2 = gradInput:clone():zero() - mytester:assertTensorEq(gradInput, gradInput2, 0.0000001) -end - -function nntest.ZipTable() - -- input : { {a1,a2}, {b1,b2}, {c1,c2} } - -- output : { {a1,b1,c1}, {a2,b2,c2} } - local z = nn.ZipTable() - local input = { - {torch.randn(3,4), torch.randn(3,4)}, - {torch.randn(3,4), torch.randn(3,4)}, - {torch.randn(3,4), torch.randn(3,4)} - } - local output = z:forward(input) - mytester:assert(#output == 2, "ZipTable #output") - mytester:assert(#(output[1]) == 3, "ZipTable #output[1]") - mytester:assertTensorEq(input[1][1], output[1][1], 0.000001, "ZipTable input11") - mytester:assertTensorEq(input[1][2], output[2][1], 0.000001, "ZipTable input12") - mytester:assertTensorEq(input[3][2], output[2][3], 0.000001, "ZipTable input32") - local gradInput = z:backward(input, output) - mytester:assert(#gradInput == 3, "ZipTable #gradInput") - mytester:assert(#(gradInput[1]) == 2, "ZipTable #gradInput[1]") - mytester:assertTensorEq(input[1][1], gradInput[1][1], 0.000001, "ZipTable gradInput11") - mytester:assertTensorEq(input[1][2], gradInput[1][2], 0.000001, "ZipTable gradInput12") - mytester:assertTensorEq(input[3][2], gradInput[3][2], 0.000001, "ZipTable gradInput32") -end - -function nntest.ZipTableOneToMany() - -- input : { v, {a,b,c} } - -- output : { {v,a}, {v,b}, {v,c} } - local z = nn.ZipTableOneToMany() - local input = { torch.randn(3), { torch.randn(4), torch.rand(4), torch.rand(4) } } - local output = z:forward(input) - mytester:assert(#output == 3, "ZipTableOneToMany #output") - mytester:assert(#(output[1]) == 2, "ZipTableOneToMany #output[1]") - mytester:assert(#(output[2]) == 2, "ZipTableOneToMany #output[2]") - mytester:assert(#(output[3]) == 2, "ZipTableOneToMany #output[3]") - mytester:assertTensorEq(input[1], output[1][1], 0.000001, "ZipTableOneToMany input1 output11") - mytester:assertTensorEq(input[1], output[2][1], 0.000001, "ZipTableOneToMany input1 output21") - mytester:assertTensorEq(input[1], output[3][1], 0.000001, "ZipTableOneToMany input1 output31") - mytester:assertTensorEq(input[2][1], output[1][2], 0.000001, "ZipTableOneToMany input21") - mytester:assertTensorEq(input[2][2], output[2][2], 0.000001, "ZipTableOneToMany input22") - mytester:assertTensorEq(input[2][3], output[3][2], 0.000001, "ZipTableOneToMany input23") - local gradInput = z:backward(input, output) - mytester:assert(#gradInput == 2, "ZipTableOneToMany #gradInput") - mytester:assert(#(gradInput[2]) == 3, "ZipTableOneToMany #gradInput[2]") - mytester:assertTensorEq(input[2][1], gradInput[2][1], 0.000001, "ZipTableOneToMany gradInput21") - mytester:assertTensorEq(input[2][2], gradInput[2][2], 0.000001, "ZipTableOneToMany gradInput22") - mytester:assertTensorEq(input[2][3], gradInput[2][3], 0.000001, "ZipTableOneToMany gradInput32") - mytester:assertTensorEq(torch.mul(input[1], 3), gradInput[1], 0.000001, "ZipTableOneToMany gradInput21") -end - -function nntest.Collapse() - local c = nn.Collapse(3) - local input = torch.randn(8,3,4,5) - local output = c:forward(input) - mytester:assertTensorEq(input:view(8,-1), output, 0.000001, "Collapse:forward") - local gradInput = c:backward(input, output) - mytester:assertTensorEq(gradInput, input, 0.000001, "Collapse:backward") - mytester:assertTableEq(gradInput:size():totable(), input:size():totable(), 0.000001, "Collapse:backward size") - local input2 = input:transpose(1,4) - local output2 = c:forward(input2) - mytester:assertTensorEq(input2:contiguous():view(5,-1), output2, 0.000001, "Collapse:forward non-contiguous") - local gradInput2 = c:backward(input2, output2) - mytester:assertTensorEq(gradInput2, input2, 0.000001, "Collapse:backward non-contiguous") - mytester:assertTableEq(gradInput2:size():totable(), input2:size():totable(), 0.000001, "Collapse:backward size non-contiguous") -end - -function nntest.Convert() - -- batch mode - local c = nn.Convert('bchw', 'chwb') - local input = torch.randn(8,3,5,5) - local output = c:forward(input) - local output2 = input:transpose(1,4):transpose(1,3):transpose(1,2) - mytester:assertTensorEq(output, output2, 0.000001, "Convert fwd bchw->chwb") - local gradInput = c:backward(input, output) - mytester:assertTensorEq(gradInput, input, 0.000001, "Convert bwd bchw->chwb") - local c = nn.Convert('bchw', 'bf') - local output = c:forward(input) - local output2 = input:view(8,-1) - mytester:assertTensorEq(output, output2, 0.000001, "Convert fwd bchw->bf") - c:float() - local output = c:forward(input:float()) - mytester:assertTensorEq(output, output2:float(), 0.000001, "Convert:type()") - local output = c:forward(input) - mytester:assertTensorEq(output, output2:float(), 0.000001, "Convert:type() double->float") - -- non-batch mode - local c = nn.Convert('chw', 'hwc') - local input = torch.randn(3,5,5) - local output = c:forward(input) - local output2 = input:transpose(1,3):transpose(1,2) - mytester:assertTensorEq(output, output2, 0.000001, "Convert fwd chw->hwc non-batch") - local gradInput = c:backward(input, output) - mytester:assertTensorEq(gradInput, input, 0.000001, "Convert bwd chw->hwc non-batch") - local c = nn.Convert('chw', 'f') - local output = c:forward(input) - local output2 = input:view(-1) - mytester:assertTensorEq(output, output2, 0.000001, "Convert fwd chw->bf non-batch") - c:float() - local output = c:forward(input:float()) - mytester:assertTensorEq(output, output2:float(), 0.000001, "Convert:type() non-batch") - local output = c:forward(input) - mytester:assertTensorEq(output, output2:float(), 0.000001, "Convert:type() double->float non-batch") -end - -function nntest.CAddTensorTable() - -- input : { v, {a,b,c} } - -- output : { v+a, v+b, v+c } - local z = nn.CAddTensorTable() - local input = { torch.randn(3), { torch.randn(3), torch.rand(3), torch.rand(3) } } - local output = z:forward(input) - mytester:assert(#output == 3, "CAddTensorTable #output") - mytester:assertTensorEq(input[1]+input[2][1], output[1], 0.00001, "CAddTensorTable input21 output1") - mytester:assertTensorEq(input[1]+input[2][2], output[2], 0.00001, "CAddTensorTable input22 output2") - mytester:assertTensorEq(input[1]+input[2][3], output[3], 0.00001, "CAddTensorTable input23 output3") - local gradInput = z:backward(input, output) - mytester:assert(#gradInput == 2, "CAddTensorTable #gradInput") - mytester:assert(#(gradInput[2]) == 3, "CAddTensorTable #gradInput[2]") - mytester:assertTensorEq(output[1], gradInput[2][1], 0.000001, "CAddTensorTable gradInput21") - mytester:assertTensorEq(output[2], gradInput[2][2], 0.000001, "CAddTensorTable gradInput22") - mytester:assertTensorEq(output[3], gradInput[2][3], 0.000001, "CAddTensorTable gradInput23") - mytester:assertTensorEq(output[1]+output[2]+output[3], gradInput[1], 0.000001, "CAddTensorTable gradInput1") -end - --- Unit Test Kmeans layer -function nntest.Kmeans() - local k = 3 - local dim = 5 - local batchSize = 200 - local input = torch.Tensor(batchSize, dim) - for i=1, batchSize do - input[i]:fill(torch.random(1, k)) - end - - local verbose = false - - local attempts = 10 - local iter = 100 - local bestLoss = 100000000 - local bestKm = nil - local tempLoss = 0 - local learningRate = 1 - - local initTypes = {'random', 'kmeans++'} - local useCudas = {false} - if pcall(function() require 'cunn' end) then - useCudas[2] = true - end - for _, initType in pairs(initTypes) do - for _, useCuda in pairs(useCudas) do - - if useCuda then - input = input:cuda() - else - input = input:double() - end - - local timer = torch.Timer() - for j=1, attempts do - local km = nn.Kmeans(k, dim) - if useCuda then km:cuda() end - - if initType == 'kmeans++' then - km:initKmeansPlus(input) - else - km:initRandom(input) - end - - for i=1, iter do - km:zeroGradParameters() - - km:forward(input) - km:backward(input, gradOutput) - - -- Gradient descent - km.weight:add(-learningRate, km.gradWeight) - tempLoss = km.loss - end - if verbose then print("Attempt Loss " .. j ..": " .. tempLoss) end - if tempLoss < bestLoss then - bestLoss = tempLoss - end - if (initType == 'kmeans++' and bestLoss < 0.00001) or (initType == 'random' and bestLoss < 500) then - break - end - end - if verbose then - print("InitType: " .. initType .. " useCuda: " .. tostring(useCuda)) - print("Best Loss: " .. bestLoss) - print("Total time: " .. timer:time().real) - end - if initType == 'kmeans++' then - mytester:assert(bestLoss < 0.00001, "Kmeans++ error ("..(useCuda and 'cuda' or 'double')..")") - else - mytester:assert(bestLoss < 500, "Kmeans error ("..(useCuda and 'cuda' or 'double')..")") - end - end - end -end - -mytester:add(nntest) - -jac = nn.Jacobian -sjac = nn.SparseJacobian -function nn.test(tests, seed) - -- Limit number of threads since everything is small - local nThreads = torch.getnumthreads() - torch.setnumthreads(1) - -- randomize stuff - local seed = seed or (1e5 * torch.tic()) - print('Seed: ', seed) - math.randomseed(seed) - torch.manualSeed(seed) - mytester:run(tests) - torch.setnumthreads(nThreads) - return mytester -end - -function nn.testTHNN(tests, seed) - require 'test.LinearTHNN' - nn.Linear = nn.LinearTHNN - return nn.test(tests,seed) -end diff --git a/contrib/lua-torch/nn/utils.lua b/contrib/lua-torch/nn/utils.lua deleted file mode 100644 index 17b52afb3e..0000000000 --- a/contrib/lua-torch/nn/utils.lua +++ /dev/null @@ -1,223 +0,0 @@ -nn.utils = {} - --- oops; someone forgot to add torch.Storage.type --- TODO replace with torch.Storage.type when implemented -local function torch_Storage_type(self, type) - local current = torch.typename(self) - if not type then return current end - if type ~= current then - local new = torch.getmetatable(type).new() - if self:size() > 0 then - new:resize(self:size()):copy(self) - end - return new - else - return self - end -end - --- tensorCache maintains a list of all tensors and storages that have been --- converted (recursively) by calls to recursiveType() and type(). --- It caches conversions in order to preserve sharing semantics --- i.e. if two tensors share a common storage, then type conversion --- should preserve that. --- --- You can preserve sharing semantics across multiple networks by --- passing tensorCache between the calls to type, e.g. --- --- > tensorCache = {} --- > net1:type('torch.CudaTensor', tensorCache) --- > net2:type('torch.CudaTensor', tensorCache) --- > nn.utils.recursiveType(anotherTensor, 'torch.CudaTensor', tensorCache) --- --- Implementation note: to make Lua table lookup behave correctly, --- tensor keys are stored as actual tensor objects, while storage --- keys are stored as the pointers themselves (as numbers). -function nn.utils.recursiveType(param, type, tensorCache) - tensorCache = tensorCache or {} - - if torch.type(param) == 'table' then - for k, v in pairs(param) do - param[k] = nn.utils.recursiveType(v, type, tensorCache) - end - elseif torch.isTypeOf(param, 'nn.Module') or - torch.isTypeOf(param, 'nn.Criterion') then - param:type(type, tensorCache) - elseif torch.isTensor(param) then - if torch.typename(param) ~= type then - local newparam - if tensorCache[param] then - newparam = tensorCache[param] - else - newparam = torch.Tensor():type(type) - local storageType = type:gsub('Tensor','Storage') - if param:storage() then - local storage_key = torch.pointer(param:storage()) - if not tensorCache[storage_key] then - tensorCache[storage_key] = torch_Storage_type( - param:storage(), storageType) - end - assert(torch.type(tensorCache[storage_key]) == storageType) - newparam:set( - tensorCache[storage_key], - param:storageOffset(), - param:size(), - param:stride() - ) - end - tensorCache[param] = newparam - end - assert(torch.type(newparam) == type) - param = newparam - end - end - return param -end - -function nn.utils.recursiveResizeAs(t1,t2) - if torch.type(t2) == 'table' then - t1 = (torch.type(t1) == 'table') and t1 or {t1} - for key,_ in pairs(t2) do - t1[key], t2[key] = nn.utils.recursiveResizeAs(t1[key], t2[key]) - end - for key,_ in pairs(t1) do - if not t2[key] then - t1[key] = nil - end - end - elseif torch.isTensor(t2) then - t1 = torch.isTensor(t1) and t1 or t2.new() - t1:resize(t2:size()) - else - error("expecting nested tensors or tables. Got ".. - torch.type(t1).." and "..torch.type(t2).." instead") - end - return t1, t2 -end - -function nn.utils.recursiveFill(t2, val) - if torch.type(t2) == 'table' then - for key,_ in pairs(t2) do - t2[key] = nn.utils.recursiveFill(t2[key], val) - end - elseif torch.isTensor(t2) then - t2:fill(val) - else - error("expecting tensor or table thereof. Got " - ..torch.type(t2).." instead") - end - return t2 -end - -function nn.utils.recursiveAdd(t1, val, t2) - if not t2 then - assert(val, "expecting at least two arguments") - t2 = val - val = 1 - end - val = val or 1 - if torch.type(t2) == 'table' then - t1 = (torch.type(t1) == 'table') and t1 or {t1} - for key,_ in pairs(t2) do - t1[key], t2[key] = nn.utils.recursiveAdd(t1[key], val, t2[key]) - end - elseif torch.isTensor(t1) and torch.isTensor(t2) then - t1:add(val, t2) - else - error("expecting nested tensors or tables. Got ".. - torch.type(t1).." and "..torch.type(t2).." instead") - end - return t1, t2 -end - -function nn.utils.recursiveCopy(t1,t2,async) - if torch.type(t2) == 'table' then - t1 = (torch.type(t1) == 'table') and t1 or {t1} - for key,_ in pairs(t2) do - t1[key], t2[key] = nn.utils.recursiveCopy(t1[key], t2[key], async) - end - elseif torch.isTensor(t2) then - t1 = torch.isTensor(t1) and t1 or t2.new() - t1:resize(t2:size()) - if async then - t1:copyAsync(t2) - else - t1:copy(t2) - end - else - error("expecting nested tensors or tables. Got ".. - torch.type(t1).." and "..torch.type(t2).." instead") - end - return t1, t2 -end - -function nn.utils.addSingletonDimension(...) - local view, t, dim - if select('#',...) < 3 then - t, dim = select(1,...) - else - view, t, dim = select(1,...) - assert(torch.isTensor(view), - "output tensor expected, got " .. type(view)) - end - - assert(torch.isTensor(t), "input tensor expected") - dim = dim or 1 - assert(dim > 0 and dim <= (t:dim() + 1), "invalid dimension: " .. dim - .. '. Tensor is of ' .. t:dim() .. ' dimensions.') - - view = view or t.new() - local size = torch.LongStorage(t:dim() + 1) - local stride = torch.LongStorage(t:dim() + 1) - - for d = 1, dim - 1 do - size[d] = t:size(d) - stride[d] = t:stride(d) - end - size[dim] = 1 - stride[dim] = 1 - for d = dim + 1, t:dim() + 1 do - size[d] = t:size(d - 1) - stride[d] = t:stride(d - 1) - end - - view:set(t:storage(), t:storageOffset(), size, stride) - return view -end - -function nn.utils.contiguousView(output, input, ...) - output = output or input.new() - if input:isContiguous() then - output:view(input, ...) - else - output:resize(input:size()) - output:copy(input) - output:view(output, ...) - end - return output -end - --- go over specified fields and clear them. accepts --- nn.utils.clearState(self, {'_buffer', '_buffer2'}) and --- nn.utils.clearState(self, '_buffer', '_buffer2') -function nn.utils.clear(self, ...) - local arg = {...} - if #arg > 0 and type(arg[1]) == 'table' then - arg = arg[1] - end - local function clear(f) - if self[f] then - if torch.isTensor(self[f]) then - self[f]:set() - elseif type(self[f]) == 'table' then - self[f] = {} - else - self[f] = nil - end - end - end - for i,v in ipairs(arg) do clear(v) end - return self -end - -table.unpack = table.unpack or unpack diff --git a/contrib/lua-torch/optim/CMakeLists.txt b/contrib/lua-torch/optim/CMakeLists.txt deleted file mode 100644 index b1c13e7011..0000000000 --- a/contrib/lua-torch/optim/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ - -CMAKE_MINIMUM_REQUIRED(VERSION 2.6 FATAL_ERROR) -SET(src) -FILE(GLOB luasrc *.lua) -ADD_TORCH_PACKAGE(optim "${src}" "${luasrc}") diff --git a/contrib/lua-torch/optim/COPYRIGHT.txt b/contrib/lua-torch/optim/COPYRIGHT.txt deleted file mode 100644 index 2e4118c0f8..0000000000 --- a/contrib/lua-torch/optim/COPYRIGHT.txt +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) -Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) -Copyright (c) 2011-2013 NYU (Clement Farabet) -Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) -Copyright (c) 2006 Idiap Research Institute (Samy Bengio) -Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the names of NEC Laboratories American and IDIAP Research - Institute nor the names of its contributors may be used to endorse or - promote products derived from this software without specific prior - written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/lua-torch/optim/ConfusionMatrix.lua b/contrib/lua-torch/optim/ConfusionMatrix.lua deleted file mode 100644 index ec5302c24d..0000000000 --- a/contrib/lua-torch/optim/ConfusionMatrix.lua +++ /dev/null @@ -1,361 +0,0 @@ ---[[ A Confusion Matrix class - -Example: - - conf = optim.ConfusionMatrix( {'cat','dog','person'} ) -- new matrix - conf:zero() -- reset matrix - for i = 1,N do - conf:add( neuralnet:forward(sample), label ) -- accumulate errors - end - print(conf) -- print matrix - image.display(conf:render()) -- render matrix -]] -local ConfusionMatrix = torch.class('optim.ConfusionMatrix') - -function ConfusionMatrix:__init(nclasses, classes) - if type(nclasses) == 'table' then - classes = nclasses - nclasses = #classes - end - self.mat = torch.LongTensor(nclasses,nclasses):zero() - self.valids = torch.FloatTensor(nclasses):zero() - self.unionvalids = torch.FloatTensor(nclasses):zero() - self.nclasses = nclasses - self.totalValid = 0 - self.averageValid = 0 - self.classes = classes or {} - -- buffers - self._mat_flat = self.mat:view(-1) - self._target = torch.FloatTensor() - self._prediction = torch.FloatTensor() - self._max = torch.FloatTensor() - self._pred_idx = torch.LongTensor() - self._targ_idx = torch.LongTensor() -end - --- takes scalar prediction and target as input -function ConfusionMatrix:_add(p, t) - assert(p and type(p) == 'number') - assert(t and type(t) == 'number') - -- non-positive values are considered missing - -- and therefore ignored - if t > 0 then - self.mat[t][p] = self.mat[t][p] + 1 - end -end - -function ConfusionMatrix:add(prediction, target) - if type(prediction) == 'number' then - -- comparing numbers - self:_add(prediction, target) - else - self._prediction:resize(prediction:size()):copy(prediction) - assert(prediction:dim() == 1) - if type(target) == 'number' then - -- prediction is a vector, then target assumed to be an index - self._max:max(self._pred_idx, self._prediction, 1) - self:_add(self._pred_idx[1], target) - else - -- both prediction and target are vectors - assert(target:dim() == 1) - self._target:resize(target:size()):copy(target) - self._max:max(self._targ_idx, self._target, 1) - self._max:max(self._pred_idx, self._prediction, 1) - self:_add(self._pred_idx[1], self._targ_idx[1]) - end - end -end - -function ConfusionMatrix:batchAdd(predictions, targets) - local preds, targs, __ - self._prediction:resize(predictions:size()):copy(predictions) - if predictions:dim() == 1 then - -- predictions is a vector of classes - preds = self._prediction - elseif predictions:dim() == 2 then - -- prediction is a matrix of class likelihoods - if predictions:size(2) == 1 then - -- or prediction just needs flattening - preds = self._prediction:select(2,1) - else - self._max:max(self._pred_idx, self._prediction, 2) - preds = self._pred_idx:select(2,1) - end - else - error("predictions has invalid number of dimensions") - end - - self._target:resize(targets:size()):copy(targets) - if targets:dim() == 1 then - -- targets is a vector of classes - targs = self._target - elseif targets:dim() == 2 then - -- targets is a matrix of one-hot rows - if targets:size(2) == 1 then - -- or targets just needs flattening - targs = self._target:select(2,1) - else - self._max:max(self._targ_idx, self._target, 2) - targs = self._targ_idx:select(2,1) - end - else - error("targets has invalid number of dimensions") - end - - -- non-positive values are considered missing and therefore ignored - local mask = targs:ge(1) - targs = targs[mask] - preds = preds[mask] - - self._mat_flat = self._mat_flat or self.mat:view(-1) -- for backward compatibility - - preds = preds:typeAs(targs) - - assert(self.mat:isContiguous() and self.mat:stride(2) == 1) - local indices = ((targs - 1) * self.mat:stride(1) + preds):typeAs(self.mat) - local ones = torch.ones(1):typeAs(self.mat):expand(indices:size(1)) - self._mat_flat:indexAdd(1, indices, ones) -end - -function ConfusionMatrix:zero() - self.mat:zero() - self.valids:zero() - self.unionvalids:zero() - self.totalValid = 0 - self.averageValid = 0 -end - -local function isNaN(number) - return number ~= number -end - -function ConfusionMatrix:updateValids() - local total = 0 - for t = 1,self.nclasses do - self.valids[t] = self.mat[t][t] / self.mat:select(1,t):sum() - self.unionvalids[t] = self.mat[t][t] / (self.mat:select(1,t):sum()+self.mat:select(2,t):sum()-self.mat[t][t]) - total = total + self.mat[t][t] - end - self.totalValid = total / self.mat:sum() - self.averageValid = 0 - self.averageUnionValid = 0 - local nvalids = 0 - local nunionvalids = 0 - for t = 1,self.nclasses do - if not isNaN(self.valids[t]) then - self.averageValid = self.averageValid + self.valids[t] - nvalids = nvalids + 1 - end - if not isNaN(self.valids[t]) and not isNaN(self.unionvalids[t]) then - self.averageUnionValid = self.averageUnionValid + self.unionvalids[t] - nunionvalids = nunionvalids + 1 - end - end - self.averageValid = self.averageValid / nvalids - self.averageUnionValid = self.averageUnionValid / nunionvalids -end - --- Calculating FAR/FRR associated with the confusion matrix - -function ConfusionMatrix:farFrr() - local cmat = self.mat - local noOfClasses = cmat:size()[1] - self._frrs = self._frrs or torch.zeros(noOfClasses) - self._frrs:zero() - self._classFrrs = self._classFrrs or torch.zeros(noOfClasses) - self._classFrrs:zero() - self._classFrrs:add(-1) - self._fars = self._fars or torch.zeros(noOfClasses) - self._fars:zero() - self._classFars = self._classFars or torch.zeros(noOfClasses) - self._classFars:zero() - self._classFars:add(-1) - local classSamplesCount = cmat:sum(2) - local indx = 1 - for i=1,noOfClasses do - if classSamplesCount[i][1] ~= 0 then - self._frrs[indx] = 1 - cmat[i][i]/classSamplesCount[i][1] - self._classFrrs[i] = self._frrs[indx] - -- Calculating FARs - local farNumerator = 0 - local farDenominator = 0 - for j=1, noOfClasses do - if i ~= j then - if classSamplesCount[j][1] ~= 0 then - farNumerator = farNumerator + cmat[j][i]/classSamplesCount[j][1] - farDenominator = farDenominator + 1 - end - end - end - self._fars[indx] = farNumerator/farDenominator - self._classFars[i] = self._fars[indx] - indx = indx + 1 - end - end - indx = indx - 1 - local returnFrrs = self._frrs[{{1, indx}}] - local returnFars = self._fars[{{1, indx}}] - return self._classFrrs, self._classFars, returnFrrs, returnFars -end - -local function log10(n) - if math.log10 then - return math.log10(n) - else - return math.log(n) / math.log(10) - end -end - -function ConfusionMatrix:__tostring__() - self:updateValids() - local str = {'ConfusionMatrix:\n'} - local nclasses = self.nclasses - table.insert(str, '[') - local maxCnt = self.mat:max() - local nDigits = math.max(8, 1 + math.ceil(log10(maxCnt))) - for t = 1,nclasses do - local pclass = self.valids[t] * 100 - pclass = string.format('%2.3f', pclass) - if t == 1 then - table.insert(str, '[') - else - table.insert(str, ' [') - end - for p = 1,nclasses do - table.insert(str, string.format('%' .. nDigits .. 'd', self.mat[t][p])) - end - if self.classes and self.classes[1] then - if t == nclasses then - table.insert(str, ']] ' .. pclass .. '% \t[class: ' .. (self.classes[t] or '') .. ']\n') - else - table.insert(str, '] ' .. pclass .. '% \t[class: ' .. (self.classes[t] or '') .. ']\n') - end - else - if t == nclasses then - table.insert(str, ']] ' .. pclass .. '% \n') - else - table.insert(str, '] ' .. pclass .. '% \n') - end - end - end - table.insert(str, ' + average row correct: ' .. (self.averageValid*100) .. '% \n') - table.insert(str, ' + average rowUcol correct (VOC measure): ' .. (self.averageUnionValid*100) .. '% \n') - table.insert(str, ' + global correct: ' .. (self.totalValid*100) .. '%') - return table.concat(str) -end - -function ConfusionMatrix:render(sortmode, display, block, legendwidth) - -- args - local confusion = self.mat:double() - local classes = self.classes - local sortmode = sortmode or 'score' -- 'score' or 'occurrence' - local block = block or 25 - local legendwidth = legendwidth or 200 - local display = display or false - - -- legends - local legend = { - ['score'] = 'Confusion matrix [sorted by scores, global accuracy = %0.3f%%, per-class accuracy = %0.3f%%]', - ['occurrence'] = 'Confusion matrix [sorted by occurrences, accuracy = %0.3f%%, per-class accuracy = %0.3f%%]' - } - - -- parse matrix / normalize / count scores - local diag = torch.FloatTensor(#classes) - local freqs = torch.FloatTensor(#classes) - local unconf = confusion - local confusion = confusion:clone() - local corrects = 0 - local total = 0 - for target = 1,#classes do - freqs[target] = confusion[target]:sum() - corrects = corrects + confusion[target][target] - total = total + freqs[target] - confusion[target]:div( math.max(confusion[target]:sum(),1) ) - diag[target] = confusion[target][target] - end - - -- accuracies - local accuracy = corrects / total * 100 - local perclass = 0 - local total = 0 - for target = 1,#classes do - if confusion[target]:sum() > 0 then - perclass = perclass + diag[target] - total = total + 1 - end - end - perclass = perclass / total * 100 - freqs:div(unconf:sum()) - - -- sort matrix - if sortmode == 'score' then - _,order = torch.sort(diag,1,true) - elseif sortmode == 'occurrence' then - _,order = torch.sort(freqs,1,true) - else - error('sort mode must be one of: score | occurrence') - end - - -- render matrix - local render = torch.zeros(#classes*block, #classes*block) - for target = 1,#classes do - for prediction = 1,#classes do - render[{ { (target-1)*block+1,target*block }, { (prediction-1)*block+1,prediction*block } }] = confusion[order[target]][order[prediction]] - end - end - - -- add grid - for target = 1,#classes do - render[{ {target*block},{} }] = 0.1 - render[{ {},{target*block} }] = 0.1 - end - - -- create rendering - require 'image' - require 'qtwidget' - require 'qttorch' - local win1 = qtwidget.newimage( (#render)[2]+legendwidth, (#render)[1] ) - image.display{image=render, win=win1} - - -- add legend - for i in ipairs(classes) do - -- background cell - win1:setcolor{r=0,g=0,b=0} - win1:rectangle((#render)[2],(i-1)*block,legendwidth,block) - win1:fill() - - -- % - win1:setfont(qt.QFont{serif=false, size=fontsize}) - local gscale = freqs[order[i]]/freqs:max()*0.9+0.1 --3/4 - win1:setcolor{r=gscale*0.5+0.2,g=gscale*0.5+0.2,b=gscale*0.8+0.2} - win1:moveto((#render)[2]+10,i*block-block/3) - win1:show(string.format('[%2.2f%% labels]',math.floor(freqs[order[i]]*10000+0.5)/100)) - - -- legend - win1:setfont(qt.QFont{serif=false, size=fontsize}) - local gscale = diag[order[i]]*0.8+0.2 - win1:setcolor{r=gscale,g=gscale,b=gscale} - win1:moveto(120+(#render)[2]+10,i*block-block/3) - win1:show(classes[order[i]]) - - for j in ipairs(classes) do - -- scores - local score = confusion[order[j]][order[i]] - local gscale = (1-score)*(score*0.8+0.2) - win1:setcolor{r=gscale,g=gscale,b=gscale} - win1:moveto((i-1)*block+block/5,(j-1)*block+block*2/3) - win1:show(string.format('%02.0f',math.floor(score*100+0.5))) - end - end - - -- generate tensor - local t = win1:image():toTensor() - - -- display - if display then - image.display{image=t, legend=string.format(legend[sortmode],accuracy,perclass)} - end - - -- return rendering - return t -end diff --git a/contrib/lua-torch/optim/Logger.lua b/contrib/lua-torch/optim/Logger.lua deleted file mode 100644 index 3c1da54d31..0000000000 --- a/contrib/lua-torch/optim/Logger.lua +++ /dev/null @@ -1,189 +0,0 @@ ---[[ Logger: a simple class to log symbols during training, - and automate plot generation - -Example: - logger = optim.Logger('somefile.log') -- file to save stuff - - for i = 1,N do -- log some symbols during - train_error = ... -- training/testing - test_error = ... - logger:add{['training error'] = train_error, - ['test error'] = test_error} - end - - logger:style{['training error'] = '-', -- define styles for plots - ['test error'] = '-'} - logger:plot() -- and plot - ----- OR --- - - logger = optim.Logger('somefile.log') -- file to save stuff - logger:setNames{'training error', 'test error'} - - for i = 1,N do -- log some symbols during - train_error = ... -- training/testing - test_error = ... - logger:add{train_error, test_error} - end - - logger:style{'-', '-'} -- define styles for plots - logger:plot() -- and plot - ------------ - - logger:setlogscale(true) -- enable logscale on Y-axis - logger:plot() -- and plot -]] -local Logger = torch.class('optim.Logger') - -function Logger:__init(filename, timestamp) - if filename then - self.name = filename - os.execute('mkdir ' .. (sys.uname() ~= 'windows' and '-p ' or '') .. ' "' .. paths.dirname(filename) .. '"') - if timestamp then - -- append timestamp to create unique log file - filename = filename .. '-'..os.date("%Y_%m_%d_%X") - end - self.file = io.open(filename,'w') - self.epsfile = self.name .. '.eps' - else - self.file = io.stdout - self.name = 'stdout' - print(' warning: no path provided, logging to std out') - end - self.empty = true - self.symbols = {} - self.styles = {} - self.names = {} - self.idx = {} - self.figure = nil - self.showPlot = true - self.plotRawCmd = nil - self.defaultStyle = '+' - self.logscale = false -end - -function Logger:setNames(names) - self.names = names - self.empty = false - self.nsymbols = #names - for k,key in pairs(names) do - self.file:write(key .. '\t') - self.symbols[k] = {} - self.styles[k] = {self.defaultStyle} - self.idx[key] = k - end - self.file:write('\n') - self.file:flush() - return self -end - -function Logger:add(symbols) - -- (1) first time ? print symbols' names on first row - if self.empty then - self.empty = false - self.nsymbols = #symbols - for k,val in pairs(symbols) do - self.file:write(k .. '\t') - self.symbols[k] = {} - self.styles[k] = {self.defaultStyle} - self.names[k] = k - end - self.idx = self.names - self.file:write('\n') - end - -- (2) print all symbols on one row - for k,val in pairs(symbols) do - if type(val) == 'number' then - self.file:write(string.format('%11.4e',val) .. '\t') - elseif type(val) == 'string' then - self.file:write(val .. '\t') - else - xlua.error('can only log numbers and strings', 'Logger') - end - end - self.file:write('\n') - self.file:flush() - -- (3) save symbols in internal table - for k,val in pairs(symbols) do - table.insert(self.symbols[k], val) - end -end - -function Logger:style(symbols) - for name,style in pairs(symbols) do - if type(style) == 'string' then - self.styles[name] = {style} - elseif type(style) == 'table' then - self.styles[name] = style - else - xlua.error('style should be a string or a table of strings','Logger') - end - end - return self -end - -function Logger:setlogscale(state) - self.logscale = state -end - -function Logger:display(state) - self.showPlot = state -end - -function Logger:plot(...) - if not xlua.require('gnuplot') then - if not self.warned then - print(' warning: cannot plot with this version of Torch') - self.warned = true - end - return - end - local plotit = false - local plots = {} - local plotsymbol = - function(name,list) - if #list > 1 then - local nelts = #list - local plot_y = torch.Tensor(nelts) - for i = 1,nelts do - plot_y[i] = list[i] - end - for _,style in ipairs(self.styles[name]) do - table.insert(plots, {self.names[name], plot_y, style}) - end - plotit = true - end - end - local args = {...} - if not args[1] then -- plot all symbols - for name,list in pairs(self.symbols) do - plotsymbol(name,list) - end - else -- plot given symbols - for _,name in ipairs(args) do - plotsymbol(self.idx[name], self.symbols[self.idx[name]]) - end - end - if plotit then - if self.showPlot then - self.figure = gnuplot.figure(self.figure) - if self.logscale then gnuplot.logscale('on') end - gnuplot.plot(plots) - if self.plotRawCmd then gnuplot.raw(self.plotRawCmd) end - gnuplot.grid('on') - gnuplot.title('') - end - if self.epsfile then - os.execute('rm -f "' .. self.epsfile .. '"') - local epsfig = gnuplot.epsfigure(self.epsfile) - if self.logscale then gnuplot.logscale('on') end - gnuplot.plot(plots) - if self.plotRawCmd then gnuplot.raw(self.plotRawCmd) end - gnuplot.grid('on') - gnuplot.title('') - gnuplot.plotflush() - gnuplot.close(epsfig) - end - end -end diff --git a/contrib/lua-torch/optim/adadelta.lua b/contrib/lua-torch/optim/adadelta.lua deleted file mode 100644 index 7cc058d290..0000000000 --- a/contrib/lua-torch/optim/adadelta.lua +++ /dev/null @@ -1,55 +0,0 @@ ---[[ ADADELTA implementation for SGD http://arxiv.org/abs/1212.5701 - -ARGS: -- `opfunc` : a function that takes a single input (X), the point of - evaluation, and returns f(X) and df/dX -- `x` : the initial point -- `config` : a table of hyper-parameters -- `config.rho` : interpolation parameter -- `config.eps` : for numerical stability -- `config.weightDecay` : weight decay -- `state` : a table describing the state of the optimizer; after each - call the state is modified -- `state.paramVariance` : vector of temporal variances of parameters -- `state.accDelta` : vector of accummulated delta of gradients -RETURN: -- `x` : the new x vector -- `f(x)` : the function, evaluated before the update -]] -function optim.adadelta(opfunc, x, config, state) - -- (0) get/update state - if config == nil and state == nil then - print('no state table, ADADELTA initializing') - end - local config = config or {} - local state = state or config - local rho = config.rho or 0.9 - local eps = config.eps or 1e-6 - local wd = config.weightDecay or 0 - state.evalCounter = state.evalCounter or 0 - -- (1) evaluate f(x) and df/dx - local fx,dfdx = opfunc(x) - - -- (2) weight decay - if wd ~= 0 then - dfdx:add(wd, x) - end - - -- (3) parameter update - if not state.paramVariance then - state.paramVariance = torch.Tensor():typeAs(x):resizeAs(dfdx):zero() - state.paramStd = torch.Tensor():typeAs(x):resizeAs(dfdx):zero() - state.delta = torch.Tensor():typeAs(x):resizeAs(dfdx):zero() - state.accDelta = torch.Tensor():typeAs(x):resizeAs(dfdx):zero() - end - state.paramVariance:mul(rho):addcmul(1-rho,dfdx,dfdx) - state.paramStd:resizeAs(state.paramVariance):copy(state.paramVariance):add(eps):sqrt() - state.delta:resizeAs(state.paramVariance):copy(state.accDelta):add(eps):sqrt():cdiv(state.paramStd):cmul(dfdx) - x:add(-1, state.delta) - state.accDelta:mul(rho):addcmul(1-rho, state.delta, state.delta) - -- (4) update evaluation counter - state.evalCounter = state.evalCounter + 1 - - -- return x*, f(x) before optimization - return x,{fx} -end diff --git a/contrib/lua-torch/optim/adagrad.lua b/contrib/lua-torch/optim/adagrad.lua deleted file mode 100644 index 6860c4317b..0000000000 --- a/contrib/lua-torch/optim/adagrad.lua +++ /dev/null @@ -1,55 +0,0 @@ ---[[ ADAGRAD implementation for SGD - -ARGS: -- `opfunc` : a function that takes a single input (X), the point of - evaluation, and returns f(X) and df/dX -- `x` : the initial point -- `state` : a table describing the state of the optimizer; after each - call the state is modified -- `state.learningRate` : learning rate -- `state.paramVariance` : vector of temporal variances of parameters -- `state.weightDecay` : scalar that controls weight decay -RETURN: -- `x` : the new x vector -- `f(x)` : the function, evaluated before the update - -]] -function optim.adagrad(opfunc, x, config, state) - -- (0) get/update state - if config == nil and state == nil then - print('no state table, ADAGRAD initializing') - end - local config = config or {} - local state = state or config - local lr = config.learningRate or 1e-3 - local lrd = config.learningRateDecay or 0 - local wd = config.weightDecay or 0 - state.evalCounter = state.evalCounter or 0 - local nevals = state.evalCounter - - -- (1) evaluate f(x) and df/dx - local fx,dfdx = opfunc(x) - - -- (2) weight decay with a single parameter - if wd ~= 0 then - dfdx:add(wd, x) - end - - -- (3) learning rate decay (annealing) - local clr = lr / (1 + nevals*lrd) - - -- (4) parameter update with single or individual learning rates - if not state.paramVariance then - state.paramVariance = torch.Tensor():typeAs(x):resizeAs(dfdx):zero() - state.paramStd = torch.Tensor():typeAs(x):resizeAs(dfdx) - end - state.paramVariance:addcmul(1,dfdx,dfdx) - state.paramStd:resizeAs(state.paramVariance):copy(state.paramVariance):sqrt() - x:addcdiv(-clr, dfdx,state.paramStd:add(1e-10)) - - -- (5) update evaluation counter - state.evalCounter = state.evalCounter + 1 - - -- return x*, f(x) before optimization - return x,{fx} -end diff --git a/contrib/lua-torch/optim/adam.lua b/contrib/lua-torch/optim/adam.lua deleted file mode 100644 index 2e127e96ad..0000000000 --- a/contrib/lua-torch/optim/adam.lua +++ /dev/null @@ -1,72 +0,0 @@ ---[[ An implementation of Adam https://arxiv.org/abs/1412.6980 - -ARGS: - -- 'opfunc' : a function that takes a single input (X), the point - of a evaluation, and returns f(X) and df/dX -- 'x' : the initial point -- 'config` : a table with configuration parameters for the optimizer -- 'config.learningRate' : learning rate -- `config.learningRateDecay` : learning rate decay -- 'config.beta1' : first moment coefficient -- 'config.beta2' : second moment coefficient -- 'config.epsilon' : for numerical stability -- 'config.weightDecay' : weight decay -- 'state' : a table describing the state of the optimizer; after each - call the state is modified - -RETURN: -- `x` : the new x vector -- `f(x)` : the function, evaluated before the update - -]] - -function optim.adam(opfunc, x, config, state) - -- (0) get/update state - local config = config or {} - local state = state or config - local lr = config.learningRate or 0.001 - local lrd = config.learningRateDecay or 0 - - local beta1 = config.beta1 or 0.9 - local beta2 = config.beta2 or 0.999 - local epsilon = config.epsilon or 1e-8 - local wd = config.weightDecay or 0 - - -- (1) evaluate f(x) and df/dx - local fx, dfdx = opfunc(x) - - -- (2) weight decay - if wd ~= 0 then - dfdx:add(wd, x) - end - - -- Initialization - state.t = state.t or 0 - -- Exponential moving average of gradient values - state.m = state.m or x.new(dfdx:size()):zero() - -- Exponential moving average of squared gradient values - state.v = state.v or x.new(dfdx:size()):zero() - -- A tmp tensor to hold the sqrt(v) + epsilon - state.denom = state.denom or x.new(dfdx:size()):zero() - - -- (3) learning rate decay (annealing) - local clr = lr / (1 + state.t*lrd) - - state.t = state.t + 1 - - -- Decay the first and second moment running average coefficient - state.m:mul(beta1):add(1-beta1, dfdx) - state.v:mul(beta2):addcmul(1-beta2, dfdx, dfdx) - - state.denom:copy(state.v):sqrt():add(epsilon) - - local biasCorrection1 = 1 - beta1^state.t - local biasCorrection2 = 1 - beta2^state.t - local stepSize = clr * math.sqrt(biasCorrection2)/biasCorrection1 - -- (4) update x - x:addcdiv(-stepSize, state.m, state.denom) - - -- return x*, f(x) before optimization - return x, {fx} -end diff --git a/contrib/lua-torch/optim/adamax.lua b/contrib/lua-torch/optim/adamax.lua deleted file mode 100644 index 2b6487720a..0000000000 --- a/contrib/lua-torch/optim/adamax.lua +++ /dev/null @@ -1,66 +0,0 @@ ---[[ An implementation of AdaMax http://arxiv.org/pdf/1412.6980.pdf - -ARGS: - -- 'opfunc' : a function that takes a single input (X), the point - of a evaluation, and returns f(X) and df/dX -- 'x' : the initial point -- 'config` : a table with configuration parameters for the optimizer -- 'config.learningRate' : learning rate -- 'config.beta1' : first moment coefficient -- 'config.beta2' : second moment coefficient -- 'config.epsilon' : for numerical stability -- 'state' : a table describing the state of the optimizer; - after each call the state is modified. - -RETURN: -- `x` : the new x vector -- `f(x)` : the function, evaluated before the update - -]] - -function optim.adamax(opfunc, x, config, state) - -- (0) get/update state - local config = config or {} - local state = state or config - local lr = config.learningRate or 0.002 - - local beta1 = config.beta1 or 0.9 - local beta2 = config.beta2 or 0.999 - local epsilon = config.epsilon or 1e-38 - local wd = config.weightDecay or 0 - - -- (1) evaluate f(x) and df/dx - local fx, dfdx = opfunc(x) - - -- (2) weight decay - if wd ~= 0 then - dfdx:add(wd, x) - end - - -- Initialization - state.t = state.t or 0 - -- Exponential moving average of gradient values - state.m = state.m or x.new(dfdx:size()):zero() - -- Exponential moving average of the infinity norm - state.u = state.u or x.new(dfdx:size()):zero() - -- A tmp tensor to hold the input to max() - state.max = state.max or x.new(2, unpack(dfdx:size():totable())):zero() - - state.t = state.t + 1 - - -- Update biased first moment estimate. - state.m:mul(beta1):add(1-beta1, dfdx) - -- Update the exponentially weighted infinity norm. - state.max[1]:copy(state.u):mul(beta2) - state.max[2]:copy(dfdx):abs():add(epsilon) - state.u:max(state.max, 1) - - local biasCorrection1 = 1 - beta1^state.t - local stepSize = lr/biasCorrection1 - -- (2) update x - x:addcdiv(-stepSize, state.m, state.u) - - -- return x*, f(x) before optimization - return x, {fx} -end diff --git a/contrib/lua-torch/optim/asgd.lua b/contrib/lua-torch/optim/asgd.lua deleted file mode 100644 index cc1c459f3d..0000000000 --- a/contrib/lua-torch/optim/asgd.lua +++ /dev/null @@ -1,73 +0,0 @@ ---[[ An implementation of ASGD - -ASGD: - - x := (1 - lambda eta_t) x - eta_t df/dx(z,x) - a := a + mu_t [ x - a ] - - eta_t = eta0 / (1 + lambda eta0 t) ^ 0.75 - mu_t = 1/max(1,t-t0) - -implements ASGD algoritm as in L.Bottou's sgd-2.0 - -ARGS: - -- `opfunc` : a function that takes a single input (X), the point of - evaluation, and returns f(X) and df/dX -- `x` : the initial point -- `state` : a table describing the state of the optimizer; after each - call the state is modified -- `state.eta0` : learning rate -- `state.lambda` : decay term -- `state.alpha` : power for eta update -- `state.t0` : point at which to start averaging - -RETURN: -- `x` : the new x vector -- `f(x)` : the function, evaluated before the update -- `ax` : the averaged x vector - -(Clement Farabet, 2012) ---]] -function optim.asgd(opfunc, x, config, state) - -- (0) get/update state - local config = config or {} - local state = state or config - config.eta0 = config.eta0 or 1e-4 - config.lambda = config.lambda or 1e-4 - config.alpha = config.alpha or 0.75 - config.t0 = config.t0 or 1e6 - - -- (hidden state) - state.eta_t = state.eta_t or config.eta0 - state.mu_t = state.mu_t or 1 - state.t = state.t or 0 - - -- (1) evaluate f(x) and df/dx - local fx,dfdx = opfunc(x) - - -- (2) decay term - x:mul(1 - config.lambda*state.eta_t) - - -- (3) update x - x:add(-state.eta_t, dfdx) - - -- (4) averaging - state.ax = state.ax or torch.Tensor():typeAs(x):resizeAs(x):zero() - state.tmp = state.tmp or torch.Tensor():typeAs(state.ax):resizeAs(state.ax) - if state.mu_t ~= 1 then - state.tmp:copy(x) - state.tmp:add(-1,state.ax):mul(state.mu_t) - state.ax:add(state.tmp) - else - state.ax:copy(x) - end - - -- (5) update eta_t and mu_t - state.t = state.t + 1 - state.eta_t = config.eta0 / math.pow((1 + config.lambda * config.eta0 * state.t), config.alpha) - state.mu_t = 1 / math.max(1, state.t - config.t0) - - -- return x*, f(x) before optimization, and average(x_t0,x_t1,x_t2,...) - return x,{fx},state.ax -end diff --git a/contrib/lua-torch/optim/cg.lua b/contrib/lua-torch/optim/cg.lua deleted file mode 100644 index 842a7d5690..0000000000 --- a/contrib/lua-torch/optim/cg.lua +++ /dev/null @@ -1,208 +0,0 @@ ---[[ - -This cg implementation is a rewrite of minimize.m written by Carl -E. Rasmussen. It is supposed to produce exactly same results (give -or take numerical accuracy due to some changed order of -operations). You can compare the result on rosenbrock with minimize.m. -http://www.gatsby.ucl.ac.uk/~edward/code/minimize/example.html - - [x fx c] = minimize([0 0]', 'rosenbrock', -25) - -Note that we limit the number of function evaluations only, it seems much -more important in practical use. - -ARGS: - -- `opfunc` : a function that takes a single input, the point of evaluation. -- `x` : the initial point -- `state` : a table of parameters and temporary allocations. -- `state.maxEval` : max number of function evaluations -- `state.maxIter` : max number of iterations -- `state.df[0,1,2,3]` : if you pass torch.Tensor they will be used for temp storage -- `state.[s,x0]` : if you pass torch.Tensor they will be used for temp storage - -RETURN: - -- `x*` : the new x vector, at the optimal point -- `f` : a table of all function values where - `f[1]` is the value of the function before any optimization and - `f[#f]` is the final fully optimized value, at x* - -(Koray Kavukcuoglu, 2012) ---]] -function optim.cg(opfunc, x, config, state) - -- parameters - local config = config or {} - local state = state or config - local rho = config.rho or 0.01 - local sig = config.sig or 0.5 - local int = config.int or 0.1 - local ext = config.ext or 3.0 - local maxIter = config.maxIter or 20 - local ratio = config.ratio or 100 - local maxEval = config.maxEval or maxIter*1.25 - local red = 1 - - local verbose = config.verbose or 0 - - local i = 0 - local ls_failed = 0 - local fx = {} - - -- we need three points for the interpolation/extrapolation stuff - local z1,z2,z3 = 0,0,0 - local d1,d2,d3 = 0,0,0 - local f1,f2,f3 = 0,0,0 - - local df1 = state.df1 or x.new() - local df2 = state.df2 or x.new() - local df3 = state.df3 or x.new() - local tdf - - df1:resizeAs(x) - df2:resizeAs(x) - df3:resizeAs(x) - - -- search direction - local s = state.s or x.new() - s:resizeAs(x) - - -- we need a temp storage for X - local x0 = state.x0 or x.new() - local f0 = 0 - local df0 = state.df0 or x.new() - x0:resizeAs(x) - df0:resizeAs(x) - - -- evaluate at initial point - f1,tdf = opfunc(x) - fx[#fx+1] = f1 - df1:copy(tdf) - i=i+1 - - -- initial search direction - s:copy(df1):mul(-1) - - d1 = -s:dot(s ) -- slope - z1 = red/(1-d1) -- initial step - - while i < math.abs(maxEval) do - - x0:copy(x) - f0 = f1 - df0:copy(df1) - - x:add(z1,s) - f2,tdf = opfunc(x) - df2:copy(tdf) - i=i+1 - d2 = df2:dot(s) - f3,d3,z3 = f1,d1,-z1 -- init point 3 equal to point 1 - local m = math.min(maxIter,maxEval-i) - local success = 0 - local limit = -1 - - while true do - while (f2 > f1+z1*rho*d1 or d2 > -sig*d1) and m > 0 do - limit = z1 - if f2 > f1 then - z2 = z3 - (0.5*d3*z3*z3)/(d3*z3+f2-f3) - else - local A = 6*(f2-f3)/z3+3*(d2+d3) - local B = 3*(f3-f2)-z3*(d3+2*d2) - z2 = (math.sqrt(B*B-A*d2*z3*z3)-B)/A - end - if z2 ~= z2 or z2 == math.huge or z2 == -math.huge then - z2 = z3/2; - end - z2 = math.max(math.min(z2, int*z3),(1-int)*z3); - z1 = z1 + z2; - x:add(z2,s) - f2,tdf = opfunc(x) - df2:copy(tdf) - i=i+1 - m = m - 1 - d2 = df2:dot(s) - z3 = z3-z2; - end - if f2 > f1+z1*rho*d1 or d2 > -sig*d1 then - break - elseif d2 > sig*d1 then - success = 1; - break; - elseif m == 0 then - break; - end - local A = 6*(f2-f3)/z3+3*(d2+d3); - local B = 3*(f3-f2)-z3*(d3+2*d2); - z2 = -d2*z3*z3/(B+math.sqrt(B*B-A*d2*z3*z3)) - - if z2 ~= z2 or z2 == math.huge or z2 == -math.huge or z2 < 0 then - if limit < -0.5 then - z2 = z1 * (ext -1) - else - z2 = (limit-z1)/2 - end - elseif (limit > -0.5) and (z2+z1) > limit then - z2 = (limit-z1)/2 - elseif limit < -0.5 and (z2+z1) > z1*ext then - z2 = z1*(ext-1) - elseif z2 < -z3*int then - z2 = -z3*int - elseif limit > -0.5 and z2 < (limit-z1)*(1-int) then - z2 = (limit-z1)*(1-int) - end - f3=f2; d3=d2; z3=-z2; - z1 = z1+z2; - x:add(z2,s) - - f2,tdf = opfunc(x) - df2:copy(tdf) - i=i+1 - m = m - 1 - d2 = df2:dot(s) - end - if success == 1 then - f1 = f2 - fx[#fx+1] = f1; - local ss = (df2:dot(df2)-df2:dot(df1)) / df1:dot(df1) - s:mul(ss) - s:add(-1,df2) - local tmp = df1:clone() - df1:copy(df2) - df2:copy(tmp) - d2 = df1:dot(s) - if d2> 0 then - s:copy(df1) - s:mul(-1) - d2 = -s:dot(s) - end - - z1 = z1 * math.min(ratio, d1/(d2-1e-320)) - d1 = d2 - ls_failed = 0 - else - x:copy(x0) - f1 = f0 - df1:copy(df0) - if ls_failed or i>maxEval then - break - end - local tmp = df1:clone() - df1:copy(df2) - df2:copy(tmp) - s:copy(df1) - s:mul(-1) - d1 = -s:dot(s) - z1 = 1/(1-d1) - ls_failed = 1 - end - end - state.df0 = df0 - state.df1 = df1 - state.df2 = df2 - state.df3 = df3 - state.x0 = x0 - state.s = s - return x,fx,i -end diff --git a/contrib/lua-torch/optim/checkgrad.lua b/contrib/lua-torch/optim/checkgrad.lua deleted file mode 100644 index 0382b21321..0000000000 --- a/contrib/lua-torch/optim/checkgrad.lua +++ /dev/null @@ -1,52 +0,0 @@ ---[[ An implementation of a simple numerical gradient checker. - -ARGS: - -- `opfunc` : a function that takes a single input (X), the point of - evaluation, and returns f(X) and df/dX -- `x` : the initial point -- `eps` : the epsilon to use for the numerical check (default is 1e-7) - -RETURN: - -- `diff` : error in the gradient, should be near tol -- `dC` : exact gradient at point -- `dC_est` : numerically estimates gradient at point - -]]-- - - --- function that numerically checks gradient of NCA loss: -function optim.checkgrad(opfunc, x, eps) - - -- compute true gradient: - local Corg,dC = opfunc(x) - dC:resize(x:size()) - - local Ctmp -- temporary value - local isTensor = torch.isTensor(Corg) - if isTensor then - Ctmp = Corg.new(Corg:size()) - end - - -- compute numeric approximations to gradient: - local eps = eps or 1e-7 - local dC_est = torch.Tensor():typeAs(dC):resizeAs(dC) - for i = 1,dC:size(1) do - local tmp = x[i] - x[i] = x[i] + eps - local C1 = opfunc(x) - if isTensor then - Ctmp:copy(C1) - C1 = Ctmp - end - x[i] = x[i] - 2 * eps - local C2 = opfunc(x) - x[i] = tmp - dC_est[i] = (C1 - C2) / (2 * eps) - end - - -- estimate error of gradient: - local diff = torch.norm(dC - dC_est) / torch.norm(dC + dC_est) - return diff,dC,dC_est -end diff --git a/contrib/lua-torch/optim/cmaes.lua b/contrib/lua-torch/optim/cmaes.lua deleted file mode 100644 index 74cd58a0ca..0000000000 --- a/contrib/lua-torch/optim/cmaes.lua +++ /dev/null @@ -1,270 +0,0 @@ -require 'torch' -require 'math' - -local BestSolution = {} ---[[ An implementation of `CMAES` (Covariance Matrix Adaptation Evolution Strategy), -ported from https://www.lri.fr/~hansen/barecmaes2.html. - -Parameters ----------- -ARGS: - -- `opfunc` : a function that takes a single input (X), the point of - evaluation, and returns f(X) and df/dX. Note that df/dX is not used -- `x` : the initial point -- `state.sigma` - float, initial step-size (standard deviation in each - coordinate) -- `state.maxEval` - int, maximal number of function evaluations -- `state.ftarget` - float, target function value -- `state.popsize` - population size. If this is left empty, - 4 + int(3 * log(|x|)) will be used -- `state.ftarget` - stop if fitness < ftarget -- `state.verb_disp` - int, display on console every verb_disp iteration, 0 for never - -RETURN: -- `x*` : the new `x` vector, at the optimal point -- `f` : a table of all function values: - `f[1]` is the value of the function before any optimization and - `f[#f]` is the final fully optimized value, at `x*` ---]] -function optim.cmaes(opfunc, x, config, state) - if (x.triu == nil or x.diag == nil) then - error('Unsupported Tensor ' .. x:type() .. " please use Float- or DoubleTensor for x") - end - -- process input parameters - local config = config or {} - local state = state or config - local xmean = x:clone():view(-1) -- distribution mean, a flattened copy - local N = xmean:size(1) -- number of objective variables/problem dimension - local sigma = state.sigma -- coordinate wise standard deviation (step size) - local ftarget = state.ftarget -- stop if fitness < ftarget - local maxEval = tonumber(state.maxEval) or 1e3*N^2 - local objfunc = opfunc - local verb_disp = state.verb_disp -- display step size - local min_iterations = state.min_iterations or 1 - - local lambda = state.popsize -- population size, offspring number - -- Strategy parameter setting: Selection - if state.popsize == nil then - lambda = 4 + math.floor(3 * math.log(N)) - end - - local mu = lambda / 2 -- number of parents/points for recombination - local weights = torch.range(0,mu-1):apply(function(i) - return math.log(mu+0.5) - math.log(i+1) end) -- recombination weights - weights:div(weights:sum()) -- normalize recombination weights array - local mueff = weights:sum()^2 / torch.pow(weights,2):sum() -- variance-effectiveness of sum w_i x_i - weights = weights:typeAs(x) - - -- Strategy parameter setting: Adaptation - local cc = (4 + mueff/N) / (N+4 + 2 * mueff/N) -- time constant for cumulation for C - local cs = (mueff + 2) / (N + mueff + 5) -- t-const for cumulation for sigma control - local c1 = 2 / ((N + 1.3)^2 + mueff) -- learning rate for rank-one update of C - local cmu = math.min(1 - c1, 2 * (mueff - 2 + 1/mueff) / ((N + 2)^2 + mueff)) -- and for rank-mu update - local damps = 2 * mueff/lambda + 0.3 + cs -- damping for sigma, usually close to 1 - - -- Initialize dynamic (internal) state variables - local pc = torch.Tensor(N):zero():typeAs(x) -- evolution paths for C - local ps = torch.Tensor(N):zero():typeAs(x) -- evolution paths for sigma - local B = torch.eye(N):typeAs(x) -- B defines the coordinate system - local D = torch.Tensor(N):fill(1):typeAs(x) -- diagonal D defines the scaling - local C = torch.eye(N):typeAs(x) -- covariance matrix - if not pcall(function () torch.symeig(C,'V') end) then -- if error occurs trying to use symeig - error('torch.symeig not available for ' .. x:type() .. - " please use Float- or DoubleTensor for x") - end - local candidates = torch.Tensor(lambda,N):typeAs(x) - local invsqrtC = torch.eye(N):typeAs(x) -- C^-1/2 - local eigeneval = 0 -- tracking the update of B and D - local counteval = 0 - local f_hist = {[1]=opfunc(x)} -- for bookkeeping output and termination - local fitvals = torch.Tensor(lambda)-- fitness values - local best = BestSolution.new(nil,nil,counteval) - local iteration = 0 -- iteration of the optimize loop - - - local function ask() - --[[return a list of lambda candidate solutions according to - m + sig * Normal(0,C) = m + sig * B * D * Normal(0,I) - --]] - -- Eigendecomposition: first update B, D and invsqrtC from C - -- postpone in case to achieve O(N^2) - if counteval - eigeneval > lambda/(c1+cmu)/C:size(1)/10 then - eigeneval = counteval - C = torch.triu(C) + torch.triu(C,1):t() -- enforce symmetry - D, B = torch.symeig(C,'V') -- eigen decomposition, B==normalized eigenvectors, O(N^3) - D = torch.sqrt(D) -- D contains standard deviations now - invsqrtC = (B * torch.diag(torch.pow(D,-1)) * B:t()) - end - for k=1,lambda do --repeat lambda times - local z = D:clone():normal(0,1):cmul(D) - candidates[{k,{}}] = torch.add(xmean, (B * z) * sigma) - end - - return candidates - end - - - local function tell(arx) - --[[update the evolution paths and the distribution parameters m, - sigma, and C within CMA-ES. - - Parameters - ---------- - `arx` - a list of solutions, presumably from `ask()` - `fitvals` - the corresponding objective function values --]] - -- bookkeeping, preparation - counteval = counteval + lambda -- slightly artificial to do here - local xold = xmean:clone() - - -- Sort by fitness and compute weighted mean into xmean - local arindex = nil --sorted indices - fitvals, arindex = torch.sort(fitvals) - arx = arx:index(1, arindex[{{1, mu}}]) -- sorted candidate solutions - - table.insert(f_hist, fitvals[1]) --append best fitness to history - best:update(arx[1], fitvals[1], counteval) - - xmean:zero() - xmean:addmv(arx:t(), weights) --dot product - - -- Cumulation: update evolution paths - local y = xmean - xold - local z = invsqrtC * y -- == C^(-1/2) * (xnew - xold) - - local c = (cs * (2-cs) * mueff)^0.5 / sigma - ps = ps - ps * cs + z * c -- exponential decay on ps - local hsig = (torch.sum(torch.pow(ps,2)) / - (1-(1-cs)^(2*counteval/lambda)) / N < 2 + 4./(N+1)) - hsig = hsig and 1.0 or 0.0 --use binary numbers - - c = (cc * (2-cc) * mueff)^0.5 / sigma - pc = pc - pc * cc + y * c * hsig -- exponential decay on pc - - -- Adapt covariance matrix C - local c1a = c1 - (1-hsig^2) * c1 * cc * (2-cc) - -- for a minor adjustment to the variance loss by hsig - for i=1,N do - for j=1,N do - local r = torch.range(1,mu) - r:apply(function(k) - return weights[k] * (arx[k][i]-xold[i]) * (arx[k][j]-xold[j]) end) - local Cmuij = torch.sum(r) / sigma^2 -- rank-mu update - C[i][j] = C[i][j] + ((-c1a - cmu) * C[i][j] + - c1 * pc[i]*pc[j] + cmu * Cmuij) - end - end - - -- Adapt step-size sigma with factor <= exp(0.6) \approx 1.82 - sigma = sigma * math.exp(math.min(0.6, - (cs / damps) * (torch.sum(torch.pow(ps,2))/N - 1)/2)) - end - - local function stop() - --[[return satisfied termination conditions in a table like - {'termination reason':value, ...}, for example {'tolfun':1e-12}, - or the empty table {}--]] - local res = {} - if counteval > 0 then - if counteval >= maxEval then - res['evals'] = maxEval - end - if ftarget ~= nil and fitvals:nElement() > 0 and fitvals[1] <= ftarget then - res['ftarget'] = ftarget - end - if torch.max(D) > 1e7 * torch.min(D) then - res['condition'] = 1e7 - end - if fitvals:nElement() > 1 and fitvals[fitvals:nElement()] - fitvals[1] < 1e-12 then - res['tolfun'] = 1e-12 - end - if sigma * torch.max(D) < 1e-11 then - -- remark: max(D) >= max(diag(C))^0.5 - res['tolx'] = 1e-11 - end - end - return res - end - - local function disp(verb_modulo) - --[[display some iteration info--]] - if verb_disp == 0 then - return nil - end - local iteration = counteval / lambda - - if iteration == 1 or iteration % (10*verb_modulo) == 0 then - print('evals:\t ax-ratio max(std) f-value') - end - if iteration <= 2 or iteration % verb_modulo == 0 then - local max_std = math.sqrt(torch.max(torch.diag(C))) - print(tostring(counteval).. ': ' .. - string.format(' %6.1f %8.1e ', torch.max(D) / torch.min(D), sigma * max_std) - .. tostring(fitvals[1])) - end - - return nil - end - - while next(stop()) == nil or iteration < min_iterations do - iteration = iteration + 1 - - local X = ask() -- deliver candidate solutions - for i=1, lambda do - -- put candidate tensor back in input shape and evaluate in opfunc - local candidate = X[i]:viewAs(x) - fitvals[i] = objfunc(candidate) - end - - tell(X) - disp(verb_disp) - end - - local bestmu, f, c = best:get() - if verb_disp > 0 then - for k, v in pairs(stop()) do - print('termination by', k, '=', v) - end - print('best f-value =', f) - print('solution = ') - print(bestmu) - print('best found at iteration: ', c/lambda, ' , total iterations: ', iteration) - end - table.insert(f_hist, f) - - return bestmu, f_hist, counteval -end - - - -BestSolution.__index = BestSolution -function BestSolution.new(x, f, evals) - local self = setmetatable({}, BestSolution) - self.x = x - self.f = f - self.evals = evals - return self -end - -function BestSolution.update(self, arx, arf, evals) - --[[initialize the best solution with `x`, `f`, and `evals`. - Better solutions have smaller `f`-values.--]] - if self.f == nil or arf < self.f then - self.x = arx:clone() - self.f = arf - self.evals = evals - end - return self -end - -function BestSolution.get(self) - return self.x, self.f, self.evals -end diff --git a/contrib/lua-torch/optim/de.lua b/contrib/lua-torch/optim/de.lua deleted file mode 100644 index 1e8e8001d5..0000000000 --- a/contrib/lua-torch/optim/de.lua +++ /dev/null @@ -1,109 +0,0 @@ ---[[ An implementation of `DE` (Differential Evolution), - -ARGS: - - -`opfunc` : a function that takes a single input (X), the point of - evaluation, and returns f(X) and df/dX. Note that df/dX is not used - -`x` : the initial point - -`state.popsize`: population size. If this is left empty, 10*d will be used - -`state.scaleFactor`: float, usually between 0.4 and 1 - -`state.crossoverRate`: float, usually between 0.1 and 0.9 - -`state.maxEval`: int, maximal number of function evaluations - -RETURN: - - `x*` : the new `x` vector, at the optimal point - - `f` : a table of all function values: - `f[1]` is the value of the function before any optimization and - `f[#f]` is the final fully optimized value, at `x*` -]] - -require 'torch' - -function optim.de(opfunc, x, config, state) - -- process input parameters - local config = config or {} - local state = state - local popsize = config.popsize -- population size - local scaleFactor = config.scaleFactor -- scale factor - local crossoverRate = config.crossoverRate -- crossover rate - local maxFEs = tonumber(config.maxFEs) -- maximal number of function evaluations - local maxRegion = config.maxRegion -- upper bound of search region - local minRegion = config.minRegion -- lower bound of search region - local xmean = x:clone():view(-1) -- distribution mean, a flattened copy - local D = xmean:size(1) -- number of objective variables/problem dimension - - if config.popsize == nil then - popsize = 10 * D - end - if config.maxRegion == nil then - maxRegion = 30 - end - if config.minRegion == nil then - minRegion = -30 - end - - -- Initialize population - local fx = x.new(maxFEs) - local pop = x.new(popsize, D) - local children = x.new(popsize, D) - local fitness = x.new(popsize) - local children_fitness = x.new(popsize) - local fes = 1 -- number of function evaluations - local best_fitness - local best_solution = x.new(D) - - -- Initialize population and evaluate the its fitness value - local gen = torch.Generator() - torch.manualSeed(gen, 1) - - pop:uniform(gen, minRegion, maxRegion) - for i = 1, popsize do - fitness[i] = opfunc(pop[i]) - fx[fes] = fitness[i] - fes = fes + 1 - end - - -- Find the best solution - local index - best_fitness, index = fitness:max(1) - best_fitness = best_fitness[1] - index = index[1] - best_solution:copy(pop[index]) - - -- Main loop - while fes < maxFEs do - local r1, r2 - for i = 1, popsize do - repeat - r1 = torch.random(gen, 1, popsize) - until(r1 ~= i) - repeat - r2 = torch.random(gen, 1, popsize) - until(r2 ~= r1 and r2 ~= i) - - local jrand = torch.random(gen, 1, D) - for j = 1, D do - if torch.uniform(gen, 0, 1) < crossoverRate or i == jrand then - children[i][j] = best_solution[j] + scaleFactor * (pop[r1][j] - pop[r2][j]) - else - children[i][j] = pop[i][j] - end - end - children_fitness[i] = opfunc(children[i]) - fx[fes] = children_fitness[i] - fes = fes + 1 - end - - for i = 1, popsize do - if children_fitness[i] <= fitness[i] then - pop[i]:copy(children[i]) - fitness[i] = children_fitness[i] - if fitness[i] < best_fitness then - best_fitness = fitness[i] - best_solution:copy(children[i]) - end - end - end - end - return best_solution, fx -end diff --git a/contrib/lua-torch/optim/fista.lua b/contrib/lua-torch/optim/fista.lua deleted file mode 100644 index c8c6f5e43f..0000000000 --- a/contrib/lua-torch/optim/fista.lua +++ /dev/null @@ -1,192 +0,0 @@ ---[[ FISTA with backtracking line search - -- `f` : smooth function -- `g` : non-smooth function -- `pl` : minimizer of intermediate problem Q(x,y) -- `xinit` : initial point -- `params` : table of parameters (**optional**) -- `params.L` : 1/(step size) for ISTA/FISTA iteration (0.1) -- `params.Lstep` : step size multiplier at each iteration (1.5) -- `params.maxiter` : max number of iterations (50) -- `params.maxline` : max number of line search iterations per iteration (20) -- `params.errthres`: Error thershold for convergence check (1e-4) -- `params.doFistaUpdate` : true : use FISTA, false: use ISTA (true) -- `params.verbose` : store each iteration solution and print detailed info (false) - -On output, `params` will contain these additional fields that can be reused. - -- `params.L` : last used L value will be written. - -These are temporary storages needed by the algo and if the same params object is -passed a second time, these same storages will be used without new allocation. - -- `params.xkm` : previous iterarion point -- `params.y` : fista iteration -- `params.ply` : ply = pl(y - 1/L grad(f)) - -Returns the solution x and history of {function evals, number of line search ,...} - -Algorithm is published in - - @article{beck-fista-09, - Author = {Beck, Amir and Teboulle, Marc}, - Journal = {SIAM J. Img. Sci.}, - Number = {1}, - Pages = {183--202}, - Title = {A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse Problems}, - Volume = {2}, - Year = {2009}} -]] -function optim.FistaLS(f, g, pl, xinit, params) - - local params = params or {} - local L = params.L or 0.1 - local Lstep = params.Lstep or 1.5 - local maxiter = params.maxiter or 50 - local maxline = params.maxline or 20 - local errthres = params.errthres or 1e-4 - local doFistaUpdate = params.doFistaUpdate - local verbose = params.verbose - - -- temporary allocations - params.xkm = params.xkm or torch.Tensor() - params.y = params.y or torch.Tensor() - params.ply = params.ply or torch.Tensor() - local xkm = params.xkm -- previous iteration - local y = params.y -- fista iteration - local ply = params.ply -- soft shrinked y - - -- we start from all zeros - local xk = xinit - xkm:resizeAs(xk):zero() - ply:resizeAs(xk):zero() - y:resizeAs(xk):zero() - - local history = {} -- keep track of stuff - local niter = 0 -- number of iterations done - local converged = false -- are we done? - local tk = 1 -- momentum param for FISTA - local tkp = 0 - - - local gy = g(y) - local fval = math.huge -- fval = f+g - while not converged and niter < maxiter do - - -- run through smooth function (code is input, input is target) - -- get derivatives from smooth function - local fy,gfy = f(y,'dx') - --local gfy = f(y) - - local fply = 0 - local gply = 0 - local Q = 0 - - ---------------------------------------------- - -- do line search to find new current location starting from fista loc - local nline = 0 - local linesearchdone = false - while not linesearchdone do - -- take a step in gradient direction of smooth function - ply:copy(y) - ply:add(-1/L,gfy) - - -- and solve for minimum of auxiliary problem - pl(ply,L) - -- this is candidate for new current iteration - xk:copy(ply) - - -- evaluate this point F(ply) - fply = f(ply) - - -- ply - y - ply:add(-1, y) - -- - local Q2 = gfy:dot(ply) - -- L/2 ||beta-y||^2 - local Q3 = L/2 * ply:dot(ply) - -- Q(beta,y) = F(y) + + L/2||beta-y||^2 + G(beta) - Q = fy + Q2 + Q3 - - if verbose then - print(string.format('nline=%d L=%g fply=%g Q=%g fy=%g Q2=%g Q3=%g',nline,L,fply,Q,fy,Q2,Q3)) - end - -- check if F(beta) < Q(pl(y),\t) - if fply <= Q then --and Fply + Gply <= F then - -- now evaluate G here - linesearchdone = true - elseif nline >= maxline then - linesearchdone = true - xk:copy(xkm) -- if we can't find a better point, current iter = previous iter - --print('oops') - else - L = L * Lstep - end - nline = nline + 1 - end - -- end line search - --------------------------------------------- - - --------------------------------------------- - -- FISTA - --------------------------------------------- - if doFistaUpdate then - -- do the FISTA step - tkp = (1 + math.sqrt(1 + 4*tk*tk)) / 2 - -- x(k-1) = x(k-1) - x(k) - xkm:add(-1,xk) - -- y(k+1) = x(k) + (1-t(k)/t(k+1))*(x(k-1)-x(k)) - y:copy(xk) - y:add( (1-tk)/tkp , xkm) - -- store for next iterations - -- x(k-1) = x(k) - xkm:copy(xk) - else - y:copy(xk) - end - -- t(k) = t(k+1) - tk = tkp - fply = f(y) - gply = g(y) - if verbose then - print(string.format('iter=%d eold=%g enew=%g',niter,fval,fply+gply)) - end - - niter = niter + 1 - - -- bookeeping - fval = fply + gply - history[niter] = {} - history[niter].nline = nline - history[niter].L = L - history[niter].F = fval - history[niter].Fply = fply - history[niter].Gply = gply - history[niter].Q = Q - params.L = L - if verbose then - history[niter].xk = xk:clone() - history[niter].y = y:clone() - end - - -- are we done? - if niter > 1 and math.abs(history[niter].F - history[niter-1].F) <= errthres then - converged = true - xinit:copy(y) - return y,history - end - - if niter >= maxiter then - xinit:copy(y) - return y,history - end - - --if niter > 1 and history[niter].F > history[niter-1].F then - --print(niter, 'This was supposed to be a convex function, we are going up') - --converged = true - --return xk,history - --end - end - error('not supposed to be here') -end - diff --git a/contrib/lua-torch/optim/init.lua b/contrib/lua-torch/optim/init.lua deleted file mode 100644 index a045bd8a26..0000000000 --- a/contrib/lua-torch/optim/init.lua +++ /dev/null @@ -1,33 +0,0 @@ - -require 'torch' - -optim = {} - --- optimizations -require('optim.sgd') -require('optim.cg') -require('optim.asgd') -require('optim.nag') -require('optim.fista') -require('optim.lbfgs') -require('optim.adagrad') -require('optim.rprop') -require('optim.adam') -require('optim.adamax') -require('optim.rmsprop') -require('optim.adadelta') -require('optim.cmaes') -require('optim.de') - --- line search functions -require('optim.lswolfe') - --- helpers -require('optim.polyinterp') -require('optim.checkgrad') - --- tools -require('optim.ConfusionMatrix') -require('optim.Logger') - -return optim diff --git a/contrib/lua-torch/optim/lbfgs.lua b/contrib/lua-torch/optim/lbfgs.lua deleted file mode 100644 index d850fcbb33..0000000000 --- a/contrib/lua-torch/optim/lbfgs.lua +++ /dev/null @@ -1,268 +0,0 @@ ---[[ An implementation of L-BFGS, heavily inspired by minFunc (Mark Schmidt) - -This implementation of L-BFGS relies on a user-provided line -search function (state.lineSearch). If this function is not -provided, then a simple learningRate is used to produce fixed -size steps. Fixed size steps are much less costly than line -searches, and can be useful for stochastic problems. - -The learning rate is used even when a line search is provided. -This is also useful for large-scale stochastic problems, where -opfunc is a noisy approximation of f(x). In that case, the learning -rate allows a reduction of confidence in the step size. - -ARGS: - -- `opfunc` : a function that takes a single input (X), the point of - evaluation, and returns f(X) and df/dX -- `x` : the initial point -- `state` : a table describing the state of the optimizer; after each - call the state is modified -- `state.maxIter` : Maximum number of iterations allowed -- `state.maxEval` : Maximum number of function evaluations -- `state.tolFun` : Termination tolerance on the first-order optimality -- `state.tolX` : Termination tol on progress in terms of func/param changes -- `state.lineSearch` : A line search function -- `state.learningRate` : If no line search provided, then a fixed step size is used - -RETURN: -- `x*` : the new `x` vector, at the optimal point -- `f` : a table of all function values: - `f[1]` is the value of the function before any optimization and - `f[#f]` is the final fully optimized value, at `x*` - -(Clement Farabet, 2012) -]] -function optim.lbfgs(opfunc, x, config, state) - -- get/update state - local config = config or {} - local state = state or config - local maxIter = tonumber(config.maxIter) or 20 - local maxEval = tonumber(config.maxEval) or maxIter*1.25 - local tolFun = config.tolFun or 1e-5 - local tolX = config.tolX or 1e-9 - local nCorrection = config.nCorrection or 100 - local lineSearch = config.lineSearch - local lineSearchOpts = config.lineSearchOptions - local learningRate = config.learningRate or 1 - local isverbose = config.verbose or false - - state.funcEval = state.funcEval or 0 - state.nIter = state.nIter or 0 - - -- verbose function - local verbose - if isverbose then - verbose = function(...) print(' ', ...) end - else - verbose = function() end - end - - -- import some functions - local abs = math.abs - local min = math.min - - -- evaluate initial f(x) and df/dx - local f,g = opfunc(x) - local f_hist = {f} - local currentFuncEval = 1 - state.funcEval = state.funcEval + 1 - local p = g:size(1) - - -- check optimality of initial point - state.tmp1 = state.tmp1 or g.new(g:size()):zero(); local tmp1 = state.tmp1 - tmp1:copy(g):abs() - if tmp1:sum() <= tolFun then - -- optimality condition below tolFun - verbose('optimality condition below tolFun') - return x,f_hist - end - - if not state.dir_bufs then - -- reusable buffers for y's and s's, and their histories - verbose('creating recyclable direction/step/history buffers') - state.dir_bufs = state.dir_bufs or g.new(nCorrection+1, p):split(1) - state.stp_bufs = state.stp_bufs or g.new(nCorrection+1, p):split(1) - for i=1,#state.dir_bufs do - state.dir_bufs[i] = state.dir_bufs[i]:squeeze(1) - state.stp_bufs[i] = state.stp_bufs[i]:squeeze(1) - end - end - - -- variables cached in state (for tracing) - local d = state.d - local t = state.t - local old_dirs = state.old_dirs - local old_stps = state.old_stps - local Hdiag = state.Hdiag - local g_old = state.g_old - local f_old = state.f_old - - -- optimize for a max of maxIter iterations - local nIter = 0 - while nIter < maxIter do - -- keep track of nb of iterations - nIter = nIter + 1 - state.nIter = state.nIter + 1 - - ------------------------------------------------------------ - -- compute gradient descent direction - ------------------------------------------------------------ - if state.nIter == 1 then - d = g:clone():mul(-1) -- -g - old_dirs = {} - old_stps = {} - Hdiag = 1 - else - -- do lbfgs update (update memory) - local y = table.remove(state.dir_bufs) -- pop - local s = table.remove(state.stp_bufs) - y:add(g, -1, g_old) -- g - g_old - s:mul(d, t) -- d*t - local ys = y:dot(s) -- y*s - if ys > 1e-10 then - -- updating memory - if #old_dirs == nCorrection then - -- shift history by one (limited-memory) - local removed1 = table.remove(old_dirs, 1) - local removed2 = table.remove(old_stps, 1) - table.insert(state.dir_bufs, removed1) - table.insert(state.stp_bufs, removed2) - end - - -- store new direction/step - table.insert(old_dirs, s) - table.insert(old_stps, y) - - -- update scale of initial Hessian approximation - Hdiag = ys / y:dot(y) -- (y*y) - else - -- put y and s back into the buffer pool - table.insert(state.dir_bufs, y) - table.insert(state.stp_bufs, s) - end - - -- compute the approximate (L-BFGS) inverse Hessian - -- multiplied by the gradient - local k = #old_dirs - - -- need to be accessed element-by-element, so don't re-type tensor: - state.ro = state.ro or torch.Tensor(nCorrection); local ro = state.ro - for i = 1,k do - ro[i] = 1 / old_stps[i]:dot(old_dirs[i]) - end - - -- iteration in L-BFGS loop collapsed to use just one buffer - local q = tmp1 -- reuse tmp1 for the q buffer - -- need to be accessed element-by-element, so don't re-type tensor: - state.al = state.al or torch.zeros(nCorrection) local al = state.al - - q:mul(g, -1) -- -g - for i = k,1,-1 do - al[i] = old_dirs[i]:dot(q) * ro[i] - q:add(-al[i], old_stps[i]) - end - - -- multiply by initial Hessian - r = d -- share the same buffer, since we don't need the old d - r:mul(q, Hdiag) -- q[1] * Hdiag - for i = 1,k do - local be_i = old_stps[i]:dot(r) * ro[i] - r:add(al[i]-be_i, old_dirs[i]) - end - -- final direction is in r/d (same object) - end - g_old = g_old or g:clone() - g_old:copy(g) - f_old = f - - ------------------------------------------------------------ - -- compute step length - ------------------------------------------------------------ - -- directional derivative - local gtd = g:dot(d) -- g * d - - -- check that progress can be made along that direction - if gtd > -tolX then - break - end - - -- reset initial guess for step size - if state.nIter == 1 then - tmp1:copy(g):abs() - t = min(1,1/tmp1:sum()) * learningRate - else - t = learningRate - end - - -- optional line search: user function - local lsFuncEval = 0 - if lineSearch and type(lineSearch) == 'function' then - -- perform line search, using user function - f,g,x,t,lsFuncEval = lineSearch(opfunc,x,t,d,f,g,gtd,lineSearchOpts) - table.insert(f_hist, f) - else - -- no line search, simply move with fixed-step - x:add(t,d) - if nIter ~= maxIter then - -- re-evaluate function only if not in last iteration - -- the reason we do this: in a stochastic setting, - -- no use to re-evaluate that function here - f,g = opfunc(x) - lsFuncEval = 1 - table.insert(f_hist, f) - end - end - - -- update func eval - currentFuncEval = currentFuncEval + lsFuncEval - state.funcEval = state.funcEval + lsFuncEval - - ------------------------------------------------------------ - -- check conditions - ------------------------------------------------------------ - if nIter == maxIter then - -- no use to run tests - verbose('reached max number of iterations') - break - end - - if currentFuncEval >= maxEval then - -- max nb of function evals - verbose('max nb of function evals') - break - end - - tmp1:copy(g):abs() - if tmp1:sum() <= tolFun then - -- check optimality - verbose('optimality condition below tolFun') - break - end - - tmp1:copy(d):mul(t):abs() - if tmp1:sum() <= tolX then - -- step size below tolX - verbose('step size below tolX') - break - end - - if abs(f-f_old) < tolX then - -- function value changing less than tolX - verbose('function value changing less than tolX') - break - end - end - - -- save state - state.old_dirs = old_dirs - state.old_stps = old_stps - state.Hdiag = Hdiag - state.g_old = g_old - state.f_old = f_old - state.t = t - state.d = d - - -- return optimal x, and history of f(x) - return x,f_hist,currentFuncEval -end diff --git a/contrib/lua-torch/optim/lswolfe.lua b/contrib/lua-torch/optim/lswolfe.lua deleted file mode 100644 index 0afbdbe8b2..0000000000 --- a/contrib/lua-torch/optim/lswolfe.lua +++ /dev/null @@ -1,192 +0,0 @@ ---[[ A Line Search satisfying the Wolfe conditions - -ARGS: -- `opfunc` : a function (the objective) that takes a single input (X), - the point of evaluation, and returns f(X) and df/dX -- `x` : initial point / starting location -- `t` : initial step size -- `d` : descent direction -- `f` : initial function value -- `g` : gradient at initial location -- `gtd` : directional derivative at starting location -- `options.c1` : sufficient decrease parameter -- `options.c2` : curvature parameter -- `options.tolX` : minimum allowable step length -- `options.maxIter` : maximum nb of iterations - -RETURN: -- `f` : function value at x+t*d -- `g` : gradient value at x+t*d -- `x` : the next x (=x+t*d) -- `t` : the step length -- `lsFuncEval` : the number of function evaluations -]] -function optim.lswolfe(opfunc,x,t,d,f,g,gtd,options) - -- options - options = options or {} - local c1 = options.c1 or 1e-4 - local c2 = options.c2 or 0.9 - local tolX = options.tolX or 1e-9 - local maxIter = options.maxIter or 20 - local isverbose = options.verbose or false - - -- some shortcuts - local abs = torch.abs - local min = math.min - local max = math.max - - -- verbose function - local function verbose(...) - if isverbose then print(' ', ...) end - end - - -- evaluate objective and gradient using initial step - local x_init = x:clone() - x:add(t,d) - local f_new,g_new = opfunc(x) - local lsFuncEval = 1 - local gtd_new = g_new * d - - -- bracket an interval containing a point satisfying the Wolfe - -- criteria - local LSiter,t_prev,done = 0,0,false - local f_prev,g_prev,gtd_prev = f,g:clone(),gtd - local bracket,bracketFval,bracketGval - while LSiter < maxIter do - -- check conditions: - if (f_new > (f + c1*t*gtd)) or (LSiter > 1 and f_new >= f_prev) then - bracket = x.new{t_prev,t} - bracketFval = x.new{f_prev,f_new} - bracketGval = x.new(2,g_new:size(1)) - bracketGval[1] = g_prev - bracketGval[2] = g_new - break - - elseif abs(gtd_new) <= -c2*gtd then - bracket = x.new{t} - bracketFval = x.new{f_new} - bracketGval = x.new(1,g_new:size(1)) - bracketGval[1] = g_new - done = true - break - - elseif gtd_new >= 0 then - bracket = x.new{t_prev,t} - bracketFval = x.new{f_prev,f_new} - bracketGval = x.new(2,g_new:size(1)) - bracketGval[1] = g_prev - bracketGval[2] = g_new - break - - end - - -- interpolate: - local tmp = t_prev - t_prev = t - local minStep = t + 0.01*(t-tmp) - local maxStep = t*10 - t = optim.polyinterp(x.new{{tmp,f_prev,gtd_prev}, - {t,f_new,gtd_new}}, - minStep, maxStep) - - -- next step: - f_prev = f_new - g_prev = g_new:clone() - gtd_prev = gtd_new - x[{}] = x_init - x:add(t,d) - f_new,g_new = opfunc(x) - lsFuncEval = lsFuncEval + 1 - gtd_new = g_new * d - LSiter = LSiter + 1 - end - - -- reached max nb of iterations? - if LSiter == maxIter then - bracket = x.new{0,t} - bracketFval = x.new{f,f_new} - bracketGval = x.new(2,g_new:size(1)) - bracketGval[1] = g - bracketGval[2] = g_new - end - - -- zoom phase: we now have a point satisfying the criteria, or - -- a bracket around it. We refine the bracket until we find the - -- exact point satisfying the criteria - local insufProgress = false - local LOposRemoved = 0 - while not done and LSiter < maxIter do - -- find high and low points in bracket - local f_LO,LOpos = bracketFval:min(1) - LOpos = LOpos[1] f_LO = f_LO[1] - local HIpos = -LOpos+3 - - -- compute new trial value - t = optim.polyinterp(x.new{{bracket[1],bracketFval[1],bracketGval[1]*d}, - {bracket[2],bracketFval[2],bracketGval[2]*d}}) - - -- test what we are making sufficient progress - if min(bracket:max()-t,t-bracket:min())/(bracket:max()-bracket:min()) < 0.1 then - if insufProgress or t>=bracket:max() or t <= bracket:min() then - if abs(t-bracket:max()) < abs(t-bracket:min()) then - t = bracket:max()-0.1*(bracket:max()-bracket:min()) - else - t = bracket:min()+0.1*(bracket:max()-bracket:min()) - end - insufProgress = false - else - insufProgress = true - end - else - insufProgress = false - end - - -- Evaluate new point - x[{}] = x_init - x:add(t,d) - f_new,g_new = opfunc(x) - lsFuncEval = lsFuncEval + 1 - gtd_new = g_new * d - LSiter = LSiter + 1 - if f_new > f + c1*t*gtd or f_new >= f_LO then - -- Armijo condition not satisfied or not lower than lowest point - bracket[HIpos] = t - bracketFval[HIpos] = f_new - bracketGval[HIpos] = g_new - else - if abs(gtd_new) <= - c2*gtd then - -- Wolfe conditions satisfied - done = true - elseif gtd_new*(bracket[HIpos]-bracket[LOpos]) >= 0 then - -- Old HI becomes new LO - bracket[HIpos] = bracket[LOpos] - bracketFval[HIpos] = bracketFval[LOpos] - bracketGval[HIpos] = bracketGval[LOpos] - end - -- New point becomes new LO - bracket[LOpos] = t - bracketFval[LOpos] = f_new - bracketGval[LOpos] = g_new - end - - -- done? - if not done and abs((bracket[1]-bracket[2])*gtd_new) < tolX then - break - end - end - - -- be verbose - if LSiter == maxIter then - verbose('reached max number of iterations') - end - - -- return stuff - local _,LOpos = bracketFval:min(1) - LOpos = LOpos[1] - t = bracket[LOpos] - f_new = bracketFval[LOpos] - g_new = bracketGval[LOpos] - x[{}] = x_init - x:add(t,d) - return f_new,g_new,x,t,lsFuncEval -end diff --git a/contrib/lua-torch/optim/nag.lua b/contrib/lua-torch/optim/nag.lua deleted file mode 100644 index 875d81e4c8..0000000000 --- a/contrib/lua-torch/optim/nag.lua +++ /dev/null @@ -1,86 +0,0 @@ ----------------------------------------------------------------------- --- An implementation of SGD adapted with features of Nesterov's --- Accelerated Gradient method, based on the paper --- On the Importance of Initialization and Momentum in Deep Learning --- Sutsveker et. al., ICML 2013 --- --- ARGS: --- opfunc : a function that takes a single input (X), the point of --- evaluation, and returns f(X) and df/dX --- x : the initial point --- state : a table describing the state of the optimizer; after each --- call the state is modified --- state.learningRate : learning rate --- state.learningRateDecay : learning rate decay --- state.weightDecay : weight decay --- state.momentum : momentum --- state.learningRates : vector of individual learning rates --- --- RETURN: --- x : the new x vector --- f(x) : the function, evaluated before the update --- --- (Dilip Krishnan, 2013) --- - -function optim.nag(opfunc, x, config, state) - -- (0) get/update state - local config = config or {} - local state = state or config - local lr = config.learningRate or 1e-3 - local lrd = config.learningRateDecay or 0 - local wd = config.weightDecay or 0 - local mom = config.momentum or 0.9 - local damp = config.dampening or mom - local lrs = config.learningRates - state.evalCounter = state.evalCounter or 0 - local nevals = state.evalCounter - - if mom <= 0 then - error('Momentum must be positive for Nesterov Accelerated Gradient') - end - - -- (1) evaluate f(x) and df/dx - -- first step in the direction of the momentum vector - - if state.dfdx then - x:add(mom, state.dfdx) - end - -- then compute gradient at that point - -- comment out the above line to get the original SGD - local fx,dfdx = opfunc(x) - - -- (2) weight decay - if wd ~= 0 then - dfdx:add(wd, x) - end - - -- (3) learning rate decay (annealing) - local clr = lr / (1 + nevals*lrd) - - -- (4) apply momentum - if not state.dfdx then - state.dfdx = torch.Tensor():typeAs(dfdx):resizeAs(dfdx):fill(0) - else - state.dfdx:mul(mom) - end - - -- (5) parameter update with single or individual learning rates - if lrs then - if not state.deltaParameters then - state.deltaParameters = torch.Tensor():typeAs(x):resizeAs(dfdx) - end - state.deltaParameters:copy(lrs):cmul(dfdx) - x:add(-clr, state.deltaParameters) - state.dfdx:add(-clr, state.deltaParameters) - else - x:add(-clr, dfdx) - state.dfdx:add(-clr, dfdx) - end - - -- (6) update evaluation counter - state.evalCounter = state.evalCounter + 1 - - -- return x, f(x) before optimization - return x,{fx} -end diff --git a/contrib/lua-torch/optim/polyinterp.lua b/contrib/lua-torch/optim/polyinterp.lua deleted file mode 100644 index c5026bf043..0000000000 --- a/contrib/lua-torch/optim/polyinterp.lua +++ /dev/null @@ -1,212 +0,0 @@ -local function isreal(x) - return x == x -end - -local function isnan(x) - return not x == x -end - -local function roots(c) - local tol=1e-12 - c[torch.lt(torch.abs(c),tol)]=0 - - local nonzero = torch.ne(c,0) - if nonzero:max() == 0 then - return 0 - end - - -- first non-zero - local _,pos = torch.max(nonzero,1) - pos = pos[1] - c=c[{ {pos,-1} }] - - local nz = 0 - for i=c:size(1),1,-1 do - if c[i] ~= 0 then - break - else - nz = nz + 1 - end - end - c=c[{ {1,c:size(1)-nz} }] - - local n = c:size(1)-1 - if n == 1 then - local e = c.new({{-c[2]/c[1], 0}}) - if nz > 0 then - return torch.cat(e, c.new(nz, 2):zero(), 1) - else - return e - end - elseif n > 1 then - local A = torch.diag(c.new(n-1):fill(1),-1) - A[1] = -c[{ {2,n+1} }]/c[1]; - local e = torch.eig(A,'N') - if nz > 0 then - return torch.cat(e, c.new(nz,2):zero(), 1) - else - return e - end - else - return c.new(nz,2):zero() - end -end - -local function real(x) - if type(x) == number then return x end - return x[{ {} , 1}] -end - -local function imag(x) - if type(x) == 'number' then return 0 end - if x:nDimension() == 1 then - return x.new(x:size(1)):zero() - else - return x[{ {}, 2}] - end -end - -local function polyval(p,x) - local pwr = p:size(1) - if type(x) == 'number' then - local val = 0 - p:apply(function(pc) pwr = pwr-1; val = val + pc*x^pwr; return pc end) - return val - else - local val = x.new(x:size(1)) - p:apply(function(pc) pwr = pwr-1; val:add(pc,torch.pow(x,pwr)); return pc end) - return val - end -end - ----------------------------------------------------------------------- --- Minimum of interpolating polynomial based on function and --- derivative values --- --- ARGS: --- points : N triplets (x,f,g), must be a Tensor --- xmin : min value that brackets minimum (default: min of points) --- xmax : max value that brackets maximum (default: max of points) --- --- RETURN: --- minPos : position of minimum --- -function optim.polyinterp(points,xminBound,xmaxBound) - -- locals - local sqrt = torch.sqrt - local mean = torch.mean - local max = math.max - local min = math.min - - -- nb of points / order of polynomial - local nPoints = points:size(1) - local order = nPoints*2-1 - - -- returned values - local minPos - - -- Code for most common case: - -- + cubic interpolation of 2 points w/ function and derivative values for both - -- + no xminBound/xmaxBound - if nPoints == 2 and order == 3 and not xminBound and not xmaxBound then - -- Solution in this case (where x2 is the farthest point): - -- d1 = g1 + g2 - 3*(f1-f2)/(x1-x2); - -- d2 = sqrt(d1^2 - g1*g2); - -- minPos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2)); - -- t_new = min(max(minPos,x1),x2); - local minVal,minPos = points[{ {},1 }]:min(1) - minVal = minVal[1] minPos = minPos[1] - local notMinPos = -minPos+3; - - local d1 = points[{minPos,3}] + points[{notMinPos,3}] - - 3*(points[{minPos,2}]-points[{notMinPos,2}]) - / (points[{minPos,1}]-points[{notMinPos,1}]); - local d2 = sqrt(d1^2 - points[{minPos,3}]*points[{notMinPos,3}]); - - if isreal(d2) then -- isreal() - local t = points[{notMinPos,1}] - (points[{notMinPos,1}] - - points[{minPos,1}]) * ((points[{notMinPos,3}] + d2 - d1) - / (points[{notMinPos,3}] - points[{minPos,3}] + 2*d2)) - - minPos = min(max(t,points[{minPos,1}]),points[{notMinPos,1}]) - else - minPos = mean(points[{{},1}]) - end - return minPos - end - - -- TODO: get the code below to work! - --error(' extrapolation not implemented yet...') - - -- Compute Bounds of Interpolation Area - local xmin = points[{{},1}]:min() - local xmax = points[{{},1}]:max() - xminBound = xminBound or xmin - xmaxBound = xmaxBound or xmax - - -- Add constraints on function values - local A = points.new(nPoints*2,order+1):zero() - local b = points.new(nPoints*2,1):zero() - for i = 1,nPoints do - local constraint = points.new(order+1):zero() - for j = order,0,-1 do - constraint[order-j+1] = points[{i,1}]^j - end - A[i] = constraint - b[i] = points[{i,2}] - end - - -- Add constraints based on derivatives - for i = 1,nPoints do - local constraint = points.new(order+1):zero() - for j = 1,order do - constraint[j] = (order-j+1)*points[{i,1}]^(order-j) - end - A[nPoints+i] = constraint - b[nPoints+i] = points[{i,3}] - end - - -- Find interpolating polynomial - local res = torch.gels(b,A) - local params = res[{ {1,nPoints*2} }]:squeeze() - - params[torch.le(torch.abs(params),1e-12)]=0 - - -- Compute Critical Points - local dParams = points.new(order):zero(); - for i = 1,params:size(1)-1 do - dParams[i] = params[i]*(order-i+1) - end - - -- nan/inf? - local nans = false - if torch.ne(dParams,dParams):max() > 0 or torch.eq(dParams,math.huge):max() > 0 then - nans = true - end - - local cp = torch.cat(points.new{xminBound,xmaxBound},points[{{},1}]) - if not nans then - local cproots = roots(dParams) - local cpi = points.new(cp:size(1),2):zero() - cpi[{ {1,cp:size(1)} , 1 }] = cp - cp = torch.cat(cpi,cproots,1) - end - - -- Test Critical Points - local fmin = math.huge - -- Default to Bisection if no critical points valid: - minPos = (xminBound+xmaxBound)/2 - for i = 1,cp:size(1) do - local xCP = cp[{ {i,i} , {} }] - local ixCP = imag(xCP)[1] - local rxCP = real(xCP)[1] - if ixCP == 0 and rxCP >= xminBound and rxCP <= xmaxBound then - local fCP = polyval(params,rxCP) - if fCP < fmin then - minPos = rxCP - fmin = fCP - end - end - end - return minPos,fmin -end diff --git a/contrib/lua-torch/optim/rmsprop.lua b/contrib/lua-torch/optim/rmsprop.lua deleted file mode 100644 index aa562006ab..0000000000 --- a/contrib/lua-torch/optim/rmsprop.lua +++ /dev/null @@ -1,58 +0,0 @@ ---[[ An implementation of RMSprop - -ARGS: - -- 'opfunc' : a function that takes a single input (X), the point - of a evaluation, and returns f(X) and df/dX -- 'x' : the initial point -- 'config` : a table with configuration parameters for the optimizer -- 'config.learningRate' : learning rate -- 'config.alpha' : smoothing constant -- 'config.epsilon' : value with which to initialise m -- 'config.weightDecay' : weight decay -- 'state' : a table describing the state of the optimizer; - after each call the state is modified -- 'state.m' : leaky sum of squares of parameter gradients, -- 'state.tmp' : and the square root (with epsilon smoothing) - -RETURN: -- `x` : the new x vector -- `f(x)` : the function, evaluated before the update - -]] - -function optim.rmsprop(opfunc, x, config, state) - -- (0) get/update state - local config = config or {} - local state = state or config - local lr = config.learningRate or 1e-2 - local alpha = config.alpha or 0.99 - local epsilon = config.epsilon or 1e-8 - local wd = config.weightDecay or 0 - local mfill = config.initialMean or 0 - - -- (1) evaluate f(x) and df/dx - local fx, dfdx = opfunc(x) - - -- (2) weight decay - if wd ~= 0 then - dfdx:add(wd, x) - end - - -- (3) initialize mean square values and square gradient storage - if not state.m then - state.m = torch.Tensor():typeAs(x):resizeAs(dfdx):fill(mfill) - state.tmp = torch.Tensor():typeAs(x):resizeAs(dfdx) - end - - -- (4) calculate new (leaky) mean squared values - state.m:mul(alpha) - state.m:addcmul(1.0-alpha, dfdx, dfdx) - - -- (5) perform update - state.tmp:sqrt(state.m):add(epsilon) - x:addcdiv(-lr, dfdx, state.tmp) - - -- return x*, f(x) before optimization - return x, {fx} -end diff --git a/contrib/lua-torch/optim/rprop.lua b/contrib/lua-torch/optim/rprop.lua deleted file mode 100644 index d7af164295..0000000000 --- a/contrib/lua-torch/optim/rprop.lua +++ /dev/null @@ -1,103 +0,0 @@ ---[[ A plain implementation of RPROP - -ARGS: -- `opfunc` : a function that takes a single input (X), the point of - evaluation, and returns f(X) and df/dX -- `x` : the initial point -- `state` : a table describing the state of the optimizer; after each - call the state is modified -- `state.stepsize` : initial step size, common to all components -- `state.etaplus` : multiplicative increase factor, > 1 (default 1.2) -- `state.etaminus` : multiplicative decrease factor, < 1 (default 0.5) -- `state.stepsizemax` : maximum stepsize allowed (default 50) -- `state.stepsizemin` : minimum stepsize allowed (default 1e-6) -- `state.niter` : number of iterations (default 1) - -RETURN: -- `x` : the new x vector -- `f(x)` : the function, evaluated before the update - -(Martin Riedmiller, Koray Kavukcuoglu 2013) ---]] -function optim.rprop(opfunc, x, config, state) - if config == nil and state == nil then - print('no state table RPROP initializing') - end - -- (0) get/update state - local config = config or {} - local state = state or config - local stepsize = config.stepsize or 0.1 - local etaplus = config.etaplus or 1.2 - local etaminus = config.etaminus or 0.5 - local stepsizemax = config.stepsizemax or 50.0 - local stepsizemin = config.stepsizemin or 1E-06 - local niter = config.niter or 1 - - local hfx = {} - - for i=1,niter do - - -- (1) evaluate f(x) and df/dx - local fx,dfdx = opfunc(x) - - -- init temp storage - if not state.delta then - state.delta = dfdx.new(dfdx:size()):zero() - state.stepsize = dfdx.new(dfdx:size()):fill(stepsize) - state.sign = dfdx.new(dfdx:size()) - state.psign = torch.ByteTensor(dfdx:size()) - state.nsign = torch.ByteTensor(dfdx:size()) - state.zsign = torch.ByteTensor(dfdx:size()) - state.dminmax = torch.ByteTensor(dfdx:size()) - if torch.type(x)=='torch.CudaTensor' then - -- Push to GPU - state.psign = state.psign:cuda() - state.nsign = state.nsign:cuda() - state.zsign = state.zsign:cuda() - state.dminmax = state.dminmax:cuda() - end - end - - -- sign of derivative from last step to this one - torch.cmul(state.sign, dfdx, state.delta) - torch.sign(state.sign, state.sign) - - -- get indices of >0, <0 and ==0 entries - state.sign.gt(state.psign, state.sign, 0) - state.sign.lt(state.nsign, state.sign, 0) - state.sign.eq(state.zsign, state.sign, 0) - - -- get step size updates - state.sign[state.psign] = etaplus - state.sign[state.nsign] = etaminus - state.sign[state.zsign] = 1 - - -- update stepsizes with step size updates - state.stepsize:cmul(state.sign) - - -- threshold step sizes - -- >50 => 50 - state.stepsize.gt(state.dminmax, state.stepsize, stepsizemax) - state.stepsize[state.dminmax] = stepsizemax - -- <1e-6 ==> 1e-6 - state.stepsize.lt(state.dminmax, state.stepsize, stepsizemin) - state.stepsize[state.dminmax] = stepsizemin - - -- for dir<0, dfdx=0 - -- for dir>=0 dfdx=dfdx - dfdx[state.nsign] = 0 - -- state.sign = sign(dfdx) - torch.sign(state.sign,dfdx) - - -- update weights - x:addcmul(-1,state.sign,state.stepsize) - - -- update state.dfdx with current dfdx - state.delta:copy(dfdx) - - table.insert(hfx,fx) - end - - -- return x*, f(x) before optimization - return x,hfx -end diff --git a/contrib/lua-torch/optim/sgd.lua b/contrib/lua-torch/optim/sgd.lua deleted file mode 100644 index e21c696a6e..0000000000 --- a/contrib/lua-torch/optim/sgd.lua +++ /dev/null @@ -1,90 +0,0 @@ ---[[ A plain implementation of SGD - -ARGS: - -- `opfunc` : a function that takes a single input (X), the point - of a evaluation, and returns f(X) and df/dX -- `x` : the initial point -- `config` : a table with configuration parameters for the optimizer -- `config.learningRate` : learning rate -- `config.learningRateDecay` : learning rate decay -- `config.weightDecay` : weight decay -- `config.weightDecays` : vector of individual weight decays -- `config.momentum` : momentum -- `config.dampening` : dampening for momentum -- `config.nesterov` : enables Nesterov momentum -- `config.learningRates` : vector of individual learning rates -- `state` : a table describing the state of the optimizer; after each - call the state is modified -- `state.evalCounter` : evaluation counter (optional: 0, by default) - -RETURN: -- `x` : the new x vector -- `f(x)` : the function, evaluated before the update - -(Clement Farabet, 2012) -]] -function optim.sgd(opfunc, x, config, state) - -- (0) get/update state - local config = config or {} - local state = state or config - local lr = config.learningRate or 1e-3 - local lrd = config.learningRateDecay or 0 - local wd = config.weightDecay or 0 - local mom = config.momentum or 0 - local damp = config.dampening or mom - local nesterov = config.nesterov or false - local lrs = config.learningRates - local wds = config.weightDecays - state.evalCounter = state.evalCounter or 0 - local nevals = state.evalCounter - assert(not nesterov or (mom > 0 and damp == 0), "Nesterov momentum requires a momentum and zero dampening") - - -- (1) evaluate f(x) and df/dx - local fx,dfdx = opfunc(x) - - -- (2) weight decay with single or individual parameters - if wd ~= 0 then - dfdx:add(wd, x) - elseif wds then - if not state.decayParameters then - state.decayParameters = torch.Tensor():typeAs(x):resizeAs(dfdx) - end - state.decayParameters:copy(wds):cmul(x) - dfdx:add(state.decayParameters) - end - - -- (3) apply momentum - if mom ~= 0 then - if not state.dfdx then - state.dfdx = torch.Tensor():typeAs(dfdx):resizeAs(dfdx):copy(dfdx) - else - state.dfdx:mul(mom):add(1-damp, dfdx) - end - if nesterov then - dfdx:add(mom, state.dfdx) - else - dfdx = state.dfdx - end - end - - -- (4) learning rate decay (annealing) - local clr = lr / (1 + nevals*lrd) - - -- (5) parameter update with single or individual learning rates - if lrs then - if not state.deltaParameters then - state.deltaParameters = torch.Tensor():typeAs(x):resizeAs(dfdx) - end - state.deltaParameters:copy(lrs):cmul(dfdx) - x:add(-clr, state.deltaParameters) - else - x:add(-clr, dfdx) - end - - -- (6) update evaluation counter - state.evalCounter = state.evalCounter + 1 - - -- return x*, f(x) before optimization - return x,{fx} -end diff --git a/contrib/lua-torch/paths/CMakeLists.txt b/contrib/lua-torch/paths/CMakeLists.txt deleted file mode 100644 index c578b66e99..0000000000 --- a/contrib/lua-torch/paths/CMakeLists.txt +++ /dev/null @@ -1,55 +0,0 @@ -cmake_minimum_required(VERSION 2.6) - -INCLUDE_DIRECTORIES("${CMAKE_CURRENT_BINARY_DIR}") - -INCLUDE(CheckIncludeFiles) -INCLUDE(CheckFunctionExists) -INCLUDE(CheckLibraryExists) - -IF (UNIX OR NOT WIN32) - CHECK_INCLUDE_FILES(fcntl.h HAVE_FCNTL_H) - CHECK_INCLUDE_FILES(unistd.h HAVE_UNISTD_H) - CHECK_INCLUDE_FILES(dirent.h HAVE_DIRENT_H) - CHECK_INCLUDE_FILES(time.h HAVE_TIME_H) - CHECK_INCLUDE_FILES(sys/time.h HAVE_SYS_TIME_H) - CHECK_INCLUDE_FILES(sys/ndir.h HAVE_SYS_NDIR_H) - CHECK_INCLUDE_FILES(sys/utsname.h HAVE_SYS_UTSNAME_H) - CHECK_INCLUDE_FILES(sys/dir.h HAVE_SYS_DIR_H) - CHECK_INCLUDE_FILES(ndir.h HAVE_NDIR_H) - CHECK_FUNCTION_EXISTS(getcwd HAVE_GETCWD) - CHECK_LIBRARY_EXISTS(dl dlopen "" HAVE_DLOPEN) -ENDIF (UNIX OR NOT WIN32) - -CONFIGURE_FILE("paths.h.in" "${CMAKE_CURRENT_BINARY_DIR}/paths.h") - -SET(src - "${CMAKE_CURRENT_SOURCE_DIR}/paths.c" - "${CMAKE_CURRENT_BINARY_DIR}/paths.h" ) - -SET(luasrc - "${CMAKE_CURRENT_SOURCE_DIR}/init.lua") - -# When using MSVC -IF(MSVC) - # we want to respect the standard, and we are bored of those **** . - ADD_DEFINITIONS(-D_CRT_SECURE_NO_DEPRECATE=1) -ENDIF(MSVC) - -ADD_LIBRARY("paths" SHARED ${src}) -SET_TARGET_PROPERTIES("paths" PROPERTIES - PREFIX "lib" - IMPORT_PREFIX "lib") - -IF(APPLE) - SET_TARGET_PROPERTIES("paths" PROPERTIES - LINK_FLAGS "-undefined dynamic_lookup") -ENDIF() - -INSTALL(FILES ${luasrc} DESTINATION ${LUALIBDIR}/paths) -INSTALL(TARGETS paths - LIBRARY DESTINATION ${RSPAMD_LIBDIR} - RUNTIME DESTINATION ${RSPAMD_LIBDIR}) - -IF(LUALIB) - TARGET_LINK_LIBRARIES(paths ${LUALIB}) -ENDIF() diff --git a/contrib/lua-torch/paths/COPYRIGHT.txt b/contrib/lua-torch/paths/COPYRIGHT.txt deleted file mode 100644 index bc002b78ab..0000000000 --- a/contrib/lua-torch/paths/COPYRIGHT.txt +++ /dev/null @@ -1,36 +0,0 @@ -Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) -Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) -Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) -Copyright (c) 2011-2013 NYU (Clement Farabet) -Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) -Copyright (c) 2006 Idiap Research Institute (Samy Bengio) -Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the names of Deepmind Technologies, NYU, NEC Laboratories America - and IDIAP Research Institute nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/lua-torch/paths/README.md b/contrib/lua-torch/paths/README.md deleted file mode 100644 index 2464c090ef..0000000000 --- a/contrib/lua-torch/paths/README.md +++ /dev/null @@ -1,13 +0,0 @@ - -# Filename Manipulation Package # - -This package provides portable functions and variables to manipulate the file system : - - * [Manipulating filenames](doc/filenames.md) : functions for manipulating filenames ; - * [Directory functions](doc/dirfunctions.md) : functions for listing and manipulating directories ; - * [Directory paths](doc/dirpaths.md) : paths to well known directories ; - * [Miscellaneous](doc/misc.md) : uncategorized functions ; - -When this package is loaded, it also computes a number of useful -variables indicating where the various Torch components are installed. -Do not change their values. diff --git a/contrib/lua-torch/paths/init.lua b/contrib/lua-torch/paths/init.lua deleted file mode 100644 index 9078c35236..0000000000 --- a/contrib/lua-torch/paths/init.lua +++ /dev/null @@ -1,141 +0,0 @@ -require 'libpaths' - -local assert = assert -local debug = debug -local pcall = pcall -local type = type -local ipairs = ipairs -local os = os - -function paths.is_win() - return paths.uname():match('Windows') -end - -function paths.is_mac() - return paths.uname():match('Darwin') -end - -if paths.is_win() then - paths.home = os.getenv('HOMEDRIVE') or 'C:' - paths.home = paths.home .. ( os.getenv('HOMEPATH') or '\\' ) -else - paths.home = os.getenv('HOME') or '.' -end - -function paths.files(s, f) - local d = paths.dir(s) - local n = 0 - if type(f) == 'string' then - local pattern = f - f = function(file) return file:find(pattern) end - elseif f and type(f) ~= 'function' then - error("Expecting optional arg 2 to be function or string. Got : "..torch.type(f)) - end - f = f or function(file) return true end - local n = 0 - return function() - while true do - n = n + 1 - if d == nil or n > #d then - return nil - elseif f(d[n]) then - return d[n] - end - end - end -end - -function paths.iterdirs(s) - return paths.files(s, - function(dir) - return paths.dirp(paths.concat(s, dir)) and dir ~= '.' and dir ~= '..' - end) -end - -function paths.iterfiles(s) - return paths.files(s, - function(file) - return paths.filep(paths.concat(s, file)) and file ~= '.' and file ~= '..' - end) -end - -function paths.thisfile(arg, depth) - local s = debug.getinfo(depth or 2).source - if type(s) ~= "string" then - s = nil - elseif s:match("^@") then -- when called from a file - s = paths.concat(s:sub(2)) - elseif s:match("^qt[.]") then -- when called from a qtide editor - local function z(s) return qt[s].fileName:tostring() end - local b, f = pcall(z, s:sub(4)); - if b and f and f ~= "" then s = f else s = nil end - end - if type(arg) == "string" then - if s then s = paths.concat(paths.dirname(s), arg) else s = arg end - end - return s -end - -function paths.dofile(f, depth) - local s = paths.thisfile(nil, 1 + (depth or 2)) - if s and s ~= "" then - f = paths.concat(paths.dirname(s),f) - end - return dofile(f) -end - -function paths.rmall(d, more) - if more ~= 'yes' then - return nil, "missing second argument ('yes')" - elseif paths.filep(d) then - return os.remove(d) - elseif paths.dirp(d) then - for f in paths.files(d) do - if f ~= '.' and f ~= '..' then - local ff = paths.concat(d, f) - local r0,r1,r2 = paths.rmall(ff, more) - if not r0 then - return r0,r1,ff - end - end - end - return paths.rmdir(d) - else - return nil, "not a file or directory", d - end -end - -function paths.findprogram(...) - for _,exe in ipairs{...} do - if paths.is_win() then - if not exe:match('[.]exe$') then - exe = exe .. '.exe' - end - local path, k, x = os.getenv("PATH") or "." - for dir in path:gmatch('[^;]+') do - x = paths.concat(dir, exe) - if paths.filep(x) then return x end - end - local function clean(s) - if s:match('^"') then return s:match('[^"]+') else return s end - end - k = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\' .. exe - x = paths.getregistryvalue('HKEY_CURRENT_USER', k, '') - if type(x) == 'string' then return clean(x) end - x = paths.getregistryvalue('HKEY_LOCAL_MACHINE', k, '') - if type(x) == 'string' then return clean(x) end - k = 'Applications\\' .. exe .. '\\shell\\open\\command' - x = paths.getregistryvalue('HKEY_CLASSES_ROOT', k, '') - if type(x) == 'string' then return clean(x) end - else - local path = os.getenv("PATH") or "." - for dir in path:gmatch('[^:]+') do - local x = paths.concat(dir, exe) - if paths.filep(x) then return x end - end - end - end - return nil -end - -return paths diff --git a/contrib/lua-torch/paths/mkdocs.yml b/contrib/lua-torch/paths/mkdocs.yml deleted file mode 100644 index 1fe86d1b0d..0000000000 --- a/contrib/lua-torch/paths/mkdocs.yml +++ /dev/null @@ -1,12 +0,0 @@ -site_name: paths -theme : simplex -repo_url : https://github.com/torch/paths -use_directory_urls : false -markdown_extensions: [extra] -docs_dir : doc -pages: -- [index.md, Paths] -- [filenames.md, Manipulating Filenames] -- [dirfunctions.md, Directory Functions] -- [dirpaths.md, Directory Paths] -- [misc.md, Miscellaneous] diff --git a/contrib/lua-torch/paths/paths.c b/contrib/lua-torch/paths/paths.c deleted file mode 100644 index 7c3a72e545..0000000000 --- a/contrib/lua-torch/paths/paths.c +++ /dev/null @@ -1,1161 +0,0 @@ -/* -*- C -*- */ - - -#include "paths.h" - - -/* ------------------------------------------------------ */ -/* Utils to manipulate strings */ - - -#define SBINCREMENT 256 - -typedef struct { - char *buffer; - int maxlen; - int len; -} SB; - -static void -sbinit(SB *sb) -{ - sb->buffer = (char*)malloc(SBINCREMENT); - sb->maxlen = SBINCREMENT; - sb->len = 0; -} - -static char * -sbfree(SB *sb) -{ - if (sb->buffer) - free(sb->buffer); - sb->buffer = 0; - return 0; -} - -static void -sbgrow(SB *sb, int n) -{ - if (sb->buffer && sb->len + n > sb->maxlen) - { - int nlen = sb->maxlen; - while (sb->len + n > nlen) - nlen += SBINCREMENT; - sb->buffer = (char*)realloc(sb->buffer, nlen); - sb->maxlen = nlen; - } -} - -static void -sbadd1(SB *sb, char c) -{ - sbgrow(sb, 1); - if (sb->buffer) - sb->buffer[sb->len++] = c; -} - -static void -sbaddn(SB *sb, const char *s, int n) -{ - sbgrow(sb, n); - if (sb->buffer && s && n) - memcpy(sb->buffer + sb->len, s, n); - else if (sb->buffer && n) - sbfree(sb); - sb->len += n; -} - -static void -sbaddsf(SB *sb, char *s) -{ - if (s) - sbaddn(sb, s, strlen(s)); - else - sbfree(sb); - if (s) - free((void*)s); -} - -static void -sbslash(SB *sb) -{ - int i; - if (sb->buffer && sb->len) - for(i=0; ilen; i++) - if (sb->buffer[i]=='\\') - sb->buffer[i]='/'; -} - -static int -sbpush(lua_State *L, SB *sb) -{ - sbslash(sb); - lua_pushlstring(L, sb->buffer, sb->len); - sbfree(sb); - return 1; -} - -static int -sbsetpush(lua_State *L, SB *sb, const char *s) -{ - sbfree(sb); - lua_pushstring(L, s); - return 1; -} - - -/* ------------------------------------------------------ */ -/* filep, dirp, basename, dirname */ - - -static int -filep(lua_State *L, int i) -{ - const char *s = luaL_checkstring(L, i); -#ifdef _WIN32 - struct _stat buf; - if (_stat(s,&buf) < 0) - return 0; - if (buf.st_mode & S_IFDIR) - return 0; -#else - struct stat buf; - if (stat(s,&buf) < 0) - return 0; - if (buf.st_mode & S_IFDIR) - return 0; -#endif - return 1; -} - - -static int -dirp(lua_State *L, int i) -{ - const char *s = luaL_checkstring(L, i); -#ifdef _WIN32 - char buffer[8]; - struct _stat buf; - const char *last; - if ((s[0]=='/' || s[0]=='\\') && - (s[1]=='/' || s[1]=='\\') && !s[2]) - return 1; - if (s[0] && isalpha((unsigned char)(s[0])) && s[1] == ':' && s[2] == 0) - { buffer[0]=s[0]; buffer[1]=':'; buffer[2]='.'; buffer[3]=0; s = buffer; } - if (_stat(s, &buf) >= 0) - if (buf.st_mode & S_IFDIR) - return 1; -#else - struct stat buf; - if (stat(s,&buf)==0) - if (buf.st_mode & S_IFDIR) - return 1; -#endif - return 0; -} - - -static int -lua_filep(lua_State *L) -{ - lua_pushboolean(L, filep(L, 1)); - return 1; -} - - -static int -lua_dirp(lua_State *L) -{ - lua_pushboolean(L, dirp(L, 1)); - return 1; -} - - -static int -lua_basename(lua_State *L) -{ - const char *fname = luaL_checkstring(L, 1); - const char *suffix = luaL_optstring(L, 2, 0); - -#ifdef _WIN32 - - int sl; - const char *p, *s; - SB sb; - sbinit(&sb); - /* Special cases */ - if (fname[0] && fname[1]==':') { - sbaddn(&sb, fname, 2); - fname += 2; - if (fname[0]=='/' || fname[0]=='\\') - sbadd1(&sb, '/'); - while (fname[0]=='/' || fname[0]=='\\') - fname += 1; - if (fname[0]==0) - return sbpush(L, &sb); - sb.len = 0; - } - /* Position p after last nontrivial slash */ - s = p = fname; - while (*s) { - if ((s[0]=='\\' || s[0]=='/') && - (s[1] && s[1]!='/' && s[1]!='\\' ) ) - p = s + 1; - s++; - } - /* Copy into buffer */ - while (*p && *p!='/' && *p!='\\') - sbadd1(&sb, *p++); - /* Process suffix */ - if (suffix==0 || suffix[0]==0) - return sbpush(L, &sb); - if (suffix[0]=='.') - suffix += 1; - if (suffix[0]==0) - return sbpush(L, &sb); - sl = strlen(suffix); - if (sb.len > sl) { - s = sb.buffer + sb.len - (sl + 1); - if (s[0]=='.' && _strnicmp(s+1,suffix, sl)==0) - sb.len = s - sb.buffer; - } - return sbpush(L, &sb); - -#else - - int sl; - const char *s, *p; - SB sb; - sbinit(&sb); - /* Position p after last nontrivial slash */ - s = p = fname; - while (*s) { - if (s[0]=='/' && s[1] && s[1]!='/') - p = s + 1; - s++; - } - /* Copy into buffer */ - while (*p && *p!='/') - sbadd1(&sb, *p++); - /* Process suffix */ - if (suffix==0 || suffix[0]==0) - return sbpush(L, &sb); - if (suffix[0]=='.') - suffix += 1; - if (suffix[0]==0) - return sbpush(L, &sb); - sl = strlen(suffix); - if (sb.len > sl) { - s = sb.buffer + sb.len - (sl + 1); - if (s[0]=='.' && strncmp(s+1,suffix, sl)==0) - sb.len = s - sb.buffer; - } - return sbpush(L, &sb); - -#endif -} - - -static int -lua_dirname(lua_State *L) -{ - const char *fname = luaL_checkstring(L, 1); - -#ifdef _WIN32 - - const char *s; - const char *p; - SB sb; - sbinit(&sb); - /* Handle leading drive specifier */ - if (isalpha((unsigned char)fname[0]) && fname[1]==':') { - sbadd1(&sb, *fname++); - sbadd1(&sb, *fname++); - } - /* Search last non terminal / or \ */ - p = 0; - s = fname; - while (*s) { - if ((s[0]=='\\' || s[0]=='/') && - (s[1] && s[1]!='/' && s[1]!='\\') ) - p = s; - s++; - } - /* Cannot find non terminal / or \ */ - if (p == 0) { - if (sb.len > 0) { - if (fname[0]==0 || fname[0]=='/' || fname[0]=='\\') - sbadd1(&sb, '/'); - return sbpush(L, &sb); - } else { - if (fname[0]=='/' || fname[0]=='\\') - return sbsetpush(L, &sb, "//"); - else - return sbsetpush(L, &sb, "."); - } - } - /* Single leading slash */ - if (p == fname) { - sbadd1(&sb, '/'); - return sbpush(L, &sb); - } - /* Backtrack all slashes */ - while (p>fname && (p[-1]=='/' || p[-1]=='\\')) - p--; - /* Multiple leading slashes */ - if (p == fname) - return sbsetpush(L, &sb, "//"); - /* Regular case */ - s = fname; - do { - sbadd1(&sb, *s++); - } while (s= fname) { - if (*p == '.') { - lua_pushstring(L, p + 1); - return 1; - } - p--; - } - return 0; -} - - -/* ------------------------------------------------------ */ -/* cwd and concat */ - - -static int -lua_cwd(lua_State *L) -{ -#ifdef _WIN32 - - char drv[2]; - int l; - SB sb; - sbinit(&sb); - drv[0] = '.'; drv[1] = 0; - l = GetFullPathNameA(drv, sb.maxlen, sb.buffer, 0); - if (l > sb.maxlen) { - sbgrow(&sb, l+1); - l = GetFullPathNameA(drv, sb.maxlen, sb.buffer, 0); - } - if (l <= 0) - return sbsetpush(L, &sb, "."); - sb.len += l; - return sbpush(L, &sb); - -#elif HAVE_GETCWD - - const char *s; - SB sb; - sbinit(&sb); - s = getcwd(sb.buffer, sb.maxlen); - while (!s && errno==ERANGE) - { - sbgrow(&sb, sb.maxlen + SBINCREMENT); - s = getcwd(sb.buffer, sb.maxlen); - } - if (! s) - return sbsetpush(L, &sb, "."); - sb.len += strlen(s); - return sbpush(L, &sb); - -#else - - const char *s; - SB sb; - sbinit(&sb); - sbgrow(&sb, PATH_MAX); - s = getwd(sb.buffer); - if (! s) - return sbsetpush(L, &sb, "."); - sb.len += strlen(s); - return sbpush(L, &sb); - -#endif -} - - - -static int -concat_fname(lua_State *L, const char *fname) -{ - const char *from = lua_tostring(L, -1); - -#ifdef _WIN32 - - const char *s; - SB sb; - sbinit(&sb); - sbaddn(&sb, from, strlen(from)); - if (fname==0) - return sbpush(L, &sb); - /* Handle absolute part of fname */ - if (fname[0]=='/' || fname[0]=='\\') { - if (fname[1]=='/' || fname[1]=='\\') { - sb.len = 0; /* Case //abcd */ - sbaddn(&sb, "//", 2); - } else { - char drive; - if (sb.len >= 2 && sb.buffer[1]==':' /* Case "/abcd" */ - && isalpha((unsigned char)(sb.buffer[0])) ) - drive = sb.buffer[0]; - else - drive = _getdrive() + 'A' - 1; - sb.len = 0; - sbadd1(&sb, drive); - sbaddn(&sb, ":/", 2); - } - } else if (fname[0] && /* Case "x:abcd" */ - isalpha((unsigned char)(fname[0])) && fname[1]==':') { - if (fname[2]!='/' && fname[2]!='\\') { - if (sb.len < 2 || sb.buffer[1]!=':' - || !isalpha((unsigned char)(sb.buffer[0])) - || (toupper((unsigned char)sb.buffer[0]) != - toupper((unsigned char)fname[0]) ) ) - { - int l; - char drv[4]; - sb.len = 0; - drv[0]=fname[0]; drv[1]=':'; drv[2]='.'; drv[3]=0; - l = GetFullPathNameA(drv, sb.maxlen, sb.buffer, 0); - if (l > sb.maxlen) { - sbgrow(&sb, l+1); - l = GetFullPathNameA(drv, sb.maxlen, sb.buffer, 0); - } - if (l <= 0) - sbaddn(&sb, drv, 3); - else - sb.len += l; - } - fname += 2; - } else { - sb.len = 0; /* Case "x:/abcd" */ - sbadd1(&sb, toupper((unsigned char)fname[0])); - sbaddn(&sb, ":/", 2); - fname += 2; - while (*fname == '/' || *fname == '\\') - fname += 1; - } - } - /* Process path components */ - for (;;) - { - while (*fname=='/' || *fname=='\\') - fname ++; - if (*fname == 0) - return sbpush(L, &sb); - if (fname[0]=='.') { - if (fname[1]=='/' || fname[1]=='\\' || fname[1]==0) { - fname += 1; - continue; - } - if (fname[1]=='.') - if (fname[2]=='/' || fname[2]=='\\' || fname[2]==0) { - size_t l; - fname += 2; - lua_pushcfunction(L, lua_dirname); - sbpush(L, &sb); - lua_call(L, 1, 1); - s = lua_tolstring(L, -1, &l); - sbinit(&sb); - sbaddn(&sb, s, l); - lua_pop(L, 1); - continue; - } - } - if (sb.len==0 || - (sb.buffer[sb.len-1]!='/' && sb.buffer[sb.len-1]!='\\') ) - sbadd1(&sb, '/'); - while (*fname && *fname!='/' && *fname!='\\') - sbadd1(&sb, *fname++); - } - -#else - SB sb; - sbinit(&sb); - - if (fname && fname[0]=='/') - sbadd1(&sb, '/'); - else - sbaddn(&sb, from, strlen(from)); - for (;;) { - while (fname && fname[0]=='/') - fname++; - if (!fname || !fname[0]) { - sbadd1(&sb, '/'); - while (sb.len > 1 && sb.buffer[sb.len-1]=='/') - sb.len --; - return sbpush(L, &sb); - } - if (fname[0]=='.') { - if (fname[1]=='/' || fname[1]==0) { - fname +=1; - continue; - } - if (fname[1]=='.') - if (fname[2]=='/' || fname[2]==0) { - fname +=2; - while (sb.len > 0 && sb.buffer[sb.len-1]=='/') - sb.len --; - while (sb.len > 0 && sb.buffer[sb.len-1]!='/') - sb.len --; - continue; - } - } - if (sb.len == 0 || sb.buffer[sb.len-1] != '/') - sbadd1(&sb, '/'); - while (*fname!=0 && *fname!='/') - sbadd1(&sb, *fname++); - } - - -#endif - -} - - -static int -lua_concatfname(lua_State *L) -{ - int i; - int narg = lua_gettop(L); - lua_cwd(L); - for (i=1; i<=narg; i++) - { - concat_fname(L, luaL_checkstring(L, i)); - lua_remove(L, -2); - } - return 1; -} - - - -/* ------------------------------------------------------ */ -/* execdir */ - - -static int -lua_execdir(lua_State *L) -{ - const char *s = 0; -#if HAVE_LUA_EXECUTABLE_DIR - s = lua_executable_dir(0); -#endif - if (s && s[0]) - lua_pushstring(L, s); - else - lua_pushnil(L); - return 1; -} - - - -/* ------------------------------------------------------ */ -/* file lists */ - - -static int -lua_dir(lua_State *L) -{ - int k = 0; - const char *s = luaL_checkstring(L, 1); - -#ifdef _WIN32 - - SB sb; - struct _finddata_t info; - intptr_t hfind; - /* special cases */ - lua_createtable(L, 0, 0); - if ((s[0]=='/' || s[0]=='\\') && - (s[1]=='/' || s[1]=='\\') && !s[2]) - { - int drive; - hfind = GetLogicalDrives(); - for (drive='A'; drive<='Z'; drive++) - if (hfind & ((intptr_t)1<<(drive-'A'))) { - lua_pushfstring(L, "%c:/", drive); - lua_rawseti(L, -2, ++k); - } - } - else if (dirp(L, 1)) { - lua_pushliteral(L, ".."); - lua_rawseti(L, -2, ++k); - } else { - lua_pushnil(L); - return 1; - } - /* files */ - sbinit(&sb); - sbaddn(&sb, s, strlen(s)); - if (sb.len>0 && sb.buffer[sb.len-1]!='/' && sb.buffer[sb.len-1]!='\\') - sbadd1(&sb, '/'); - sbaddn(&sb, "*.*", 3); - sbadd1(&sb, 0); - hfind = _findfirst(sb.buffer, &info); - if (hfind != -1) { - do { - if (strcmp(".",info.name) && strcmp("..",info.name)) { - lua_pushstring(L, info.name); - lua_rawseti(L, -2, ++k); - } - } while ( _findnext(hfind, &info) != -1 ); - _findclose(hfind); - } - sbfree(&sb); - -#else - - DIR *dirp; - struct dirent *d; - dirp = opendir(s); - if (dirp) { - lua_createtable(L, 0, 0); - while ((d = readdir(dirp))) { - int n = NAMLEN(d); - lua_pushlstring(L, d->d_name, n); - lua_rawseti(L, -2, ++k); - } - closedir(dirp); - } else - lua_pushnil(L); - -#endif - - return 1; -} - - -/* ------------------------------------------------------ */ -/* tmpname */ - - -static const char *tmpnames_key = "tmpname_sentinel"; - -struct tmpname_s { - struct tmpname_s *next; - char tmp[4]; -}; - -static int -gc_tmpname(lua_State *L) -{ - if (lua_isuserdata(L, -1)) - { - struct tmpname_s **pp = (struct tmpname_s **)lua_touserdata(L, -1); - while (pp && *pp) - { - struct tmpname_s *p = *pp; - *pp = p->next; - remove(p->tmp); - free(p); - } - } - return 0; - -} - -static void -add_tmpname(lua_State *L, const char *tmp) -{ - struct tmpname_s **pp = 0; - lua_pushlightuserdata(L, (void*)tmpnames_key); - lua_rawget(L, LUA_REGISTRYINDEX); - if (lua_isuserdata(L, -1)) - { - pp = (struct tmpname_s **)lua_touserdata(L, -1); - lua_pop(L, 1); - } - else - { - lua_pop(L, 1); - /* create sentinel */ - lua_pushlightuserdata(L, (void*)tmpnames_key); - pp = (struct tmpname_s **)lua_newuserdata(L, sizeof(void*)); - pp[0] = 0; - lua_createtable(L, 0, 1); - lua_pushcfunction(L, gc_tmpname); - lua_setfield(L,-2,"__gc"); - lua_setmetatable(L, -2); - lua_rawset(L, LUA_REGISTRYINDEX); - } - while (pp && *pp) - { - struct tmpname_s *p = *pp; - if (!strcmp(p->tmp, tmp)) { - return; - } - pp = &(p->next); - } - if (pp) - { - int len = strlen(tmp); - struct tmpname_s *t = (struct tmpname_s*)malloc(len + sizeof(struct tmpname_s)); - if (t) - { - t->next = 0; - memcpy(t->tmp, tmp, len); - t->tmp[len] = 0; - *pp = t; - } - } -} - - -static int -lua_tmpname(lua_State *L) -{ - char *tmp; - int fd = -1; -#ifdef _WIN32 - tmp = _tempnam("c:/temp", "luatmp"); -#else - char *tempdir = getenv("TMPDIR"); - if (tempdir == NULL) { - tempdir = "/tmp"; - } - tmp = calloc(1, PATH_MAX); - snprintf(tmp, PATH_MAX, "%s/%sXXXXXXXX", tempdir, "luatmp"); - fd = mkstemp(tmp); - - if (fd == -1) { - free(tmp); - tmp = NULL; - } - else { - /* Stupid and unsafe thing but that's how this library wants to do it */ - close(fd); - } -#endif - if (tmp) - { - lua_pushstring(L, tmp); - add_tmpname(L, tmp); - free(tmp); - return 1; - } - else - { - lua_pushnil(L); - return 1; - } -} - - - -/* ------------------------------------------------------ */ -/* mkdir, rmdir */ - -static int -pushresult (lua_State *L, int i, const char *filename) { - int en = errno; - if (i) { - lua_pushboolean(L, 1); - return 1; - } - else { - lua_pushnil(L); - lua_pushfstring(L, "%s: %s", filename, strerror(en)); - lua_pushinteger(L, en); - return 3; - } -} - -static int -lua_mkdir(lua_State *L) -{ - int status = 0; - const char *s = luaL_checkstring(L, 1); - lua_pushcfunction(L, lua_mkdir); - lua_pushcfunction(L, lua_dirname); - lua_pushvalue(L, 1); - lua_call(L, 1, 1); - if (! dirp(L, -1)) - lua_call(L, 1, 3); -#ifdef _WIN32 - status = _mkdir(s); -#else - status = mkdir(s, 0777); -#endif - return pushresult(L, status == 0, s); -} - -static int -lua_rmdir(lua_State *L) -{ - const char *s = luaL_checkstring(L, 1); -#ifdef _WIN32 - int status = _rmdir(s); -#else - int status = rmdir(s); -#endif - return pushresult(L, status == 0, s); -} - - -/* ------------------------------------------------------ */ -/* uname */ - - -static int -lua_uname(lua_State *L) -{ -#if defined(_WIN32) - const char *name; - SYSTEM_INFO info; - lua_pushliteral(L, "Windows"); - name = getenv("COMPUTERNAME"); - lua_pushstring(L, name ? name : ""); - memset(&info, 0, sizeof(info)); - GetSystemInfo(&info); - if (info.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) - lua_pushliteral(L, "AMD64"); - else if (info.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_INTEL) - lua_pushliteral(L, "X86"); - else if (info.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_ARM) - lua_pushliteral(L, "ARM"); - else if (info.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_IA64) - lua_pushliteral(L, "IA64"); - else if (info.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_IA64) - lua_pushstring(L, ""); - return 3; -#else -# if defined(HAVE_SYS_UTSNAME_H) - struct utsname info; - if (uname(&info) >= 0) - { - lua_pushstring(L, info.sysname); - lua_pushstring(L, info.nodename); - lua_pushstring(L, info.machine); - return 3; - } -# endif - lua_pushstring(L, "Unknown"); - return 1; -#endif -} - -static int -lua_getregistryvalue(lua_State *L) -{ -#ifdef _WIN32 - static char *keynames[] = { - "HKEY_CLASSES_ROOT", - "HKEY_CURRENT_CONFIG", - "HKEY_CURRENT_USER", - "HKEY_LOCAL_MACHINE", - "HKEY_USERS", - NULL }; - static HKEY keys[] = { - HKEY_CLASSES_ROOT, - HKEY_CURRENT_CONFIG, - HKEY_CURRENT_USER, - HKEY_LOCAL_MACHINE, - HKEY_USERS - }; - - HKEY rkey = keys[ luaL_checkoption(L, 1, NULL, keynames) ]; - const char *subkey = luaL_checkstring(L, 2); - const char *value = luaL_checkstring(L, 3); - HKEY skey; - DWORD type; - DWORD len = 0; - char *data = NULL; - LONG res; - res = RegOpenKeyExA(rkey, subkey, 0, KEY_READ, &skey); - if (res != ERROR_SUCCESS) - { - lua_pushnil(L); - lua_pushinteger(L, res); - if (res == ERROR_FILE_NOT_FOUND) - lua_pushstring(L, "subkey not found"); - if (res == ERROR_ACCESS_DENIED) - lua_pushstring(L, "subkey access denied"); - else - return 2; - return 3; - } - res = RegQueryValueExA(skey, value, NULL, &type, (LPBYTE)data, &len); - if (len > 0) - { - len += 8; - data = (char*)malloc(len); - if (! data) - luaL_error(L, "out of memory"); - res = RegQueryValueExA(skey, value, NULL, &type, (LPBYTE)data, &len); - } - RegCloseKey(skey); - if (res != ERROR_SUCCESS) - { - if (data) - free(data); - lua_pushnil(L); - lua_pushinteger(L, res); - if (res == ERROR_FILE_NOT_FOUND) - lua_pushstring(L, "value not found"); - if (res == ERROR_ACCESS_DENIED) - lua_pushstring(L, "value access denied"); - else - return 2; - return 3; - } - switch(type) - { - case REG_DWORD: - lua_pushinteger(L, (lua_Integer)*(const DWORD*)data); - if (data) - free(data); - return 1; - case REG_EXPAND_SZ: - if (data && len > 0) - { - if ((len = ExpandEnvironmentStrings(data, NULL, 0)) > 0) - { - char *buf = (char*)malloc(len + 8); - if (!buf) - luaL_error(L, "out of memory"); - len = ExpandEnvironmentStrings(data, buf, len+8); - free(data); - data = buf; - } - } - /* fall thru */ - case REG_SZ: - if (data && len > 0) - if (((const char*)data)[len-1] == 0) - len -= 1; - /* fall thru */ - case REG_BINARY: - if (data && len > 0) - lua_pushlstring(L, (const char*)data, (int)len); - else - lua_pushliteral(L, ""); - if (data) - free(data); - return 1; - /* unimplemented */ - case REG_QWORD: - case REG_MULTI_SZ: - default: - lua_pushnil(L); - lua_pushinteger(L, res); - lua_pushfstring(L, "getting registry type %d not implemented", type); - return 3; - } -#else - luaL_error(L, "This function exists only on windows"); - return 0; -#endif -} - -/* ------------------------------------------------------ */ -/* require (with global flag) */ - -#ifdef HAVE_DLOPEN -# define NEED_PATH_REQUIRE 1 -# include -# ifndef RTLD_LAZY -# define RTLD_LAZY 1 -# endif -# ifndef RTLD_GLOBAL -# define RTLD_GLOBAL 0 -# endif -# define LL_LOAD(h,fname) h=dlopen(fname,RTLD_LAZY|RTLD_GLOBAL) -# define LL_SYM(h,sym) dlsym(h, sym) -#endif - -#ifdef _WIN32 -# define NEED_PATH_REQUIRE 1 -# include -# define LL_LOAD(h,fname) h=(void*)LoadLibraryA(fname) -# define LL_SYM(h,sym) GetProcAddress((HINSTANCE)h,sym) -#endif - -#if NEED_PATH_REQUIRE - -/* {{{ functions copied or derived from loadlib.c */ - -static int readable (const char *filename) -{ - FILE *f = fopen(filename, "r"); /* try to open file */ - if (f == NULL) return 0; /* open failed */ - fclose(f); - return 1; -} - -#if LUA_VERSION_NUM >= 502 /* LUA52 compatibility defs */ -#define LUA_PATHSEP ";" -#define PATHS_LUA_CLEANUP_DEFS 1 -#endif -static const char *pushnexttemplate (lua_State *L, const char *path) -{ - const char *l; - while (*path == *LUA_PATHSEP) path++; /* skip separators */ - if (*path == '\0') return NULL; /* no more templates */ - l = strchr(path, *LUA_PATHSEP); /* find next separator */ - if (l == NULL) l = path + strlen(path); - lua_pushlstring(L, path, l - path); /* template */ - return l; -} -#ifdef PATHS_LUA_CLEANUP_DEFS /* cleanup after yourself */ -#undef LUA_PATHSEP -#endif - -static const char *pushfilename (lua_State *L, const char *name) -{ - const char *path; - const char *filename; - lua_getglobal(L, "package"); - lua_getfield(L, -1, "cpath"); - lua_remove(L, -2); - if (! (path = lua_tostring(L, -1))) - luaL_error(L, LUA_QL("package.cpath") " must be a string"); - lua_pushliteral(L, ""); - while ((path = pushnexttemplate(L, path))) { - filename = luaL_gsub(L, lua_tostring(L, -1), "?", name); - lua_remove(L, -2); - if (readable(filename)) - { /* stack: cpath errmsg filename */ - lua_remove(L, -3); - lua_remove(L, -2); - return lua_tostring(L, -1); - } - lua_pushfstring(L, "\n\tno file " LUA_QS, filename); - lua_remove(L, -2); /* remove file name */ - lua_concat(L, 2); /* add entry to possible error message */ - } - lua_pushfstring(L, "module " LUA_QS " not found", name); - lua_replace(L, -3); - lua_concat(L, 2); - lua_error(L); - return 0; -} - -/* functions copied or derived from loadlib.c }}} */ - -static int -path_require(lua_State *L) -{ - const char *filename; - lua_CFunction func; - void *handle; - const char *name = luaL_checkstring(L, 1); - lua_settop(L, 1); - lua_getfield(L, LUA_REGISTRYINDEX, "_LOADED"); /* index 2 */ - lua_getfield(L, 2, name); - if (lua_toboolean(L, -1)) - return 1; - filename = pushfilename(L, name); /* index 3 */ - LL_LOAD(handle, filename); - if (! handle) - luaL_error(L, "cannot load " LUA_QS, filename); - lua_pushfstring(L, "luaopen_%s", name); /* index 4 */ - func = (lua_CFunction)LL_SYM(handle, lua_tostring(L, -1)); - if (! func) - luaL_error(L, "no symbol " LUA_QS " in module " LUA_QS, - lua_tostring(L, -1), filename); - lua_pushboolean(L, 1); - lua_setfield(L, 2, name); - lua_pushcfunction(L, func); - lua_pushstring(L, name); - lua_call(L, 1, 1); - if (! lua_isnil(L, -1)) - lua_setfield(L, 2, name); - lua_getfield(L, 2, name); - return 1; -} - -#else - -/* fallback to calling require */ - -static int -path_require(lua_State *L) -{ - int narg = lua_gettop(L); - lua_getglobal(L, "require"); - lua_insert(L, 1); - lua_call(L, narg, 1); - return 1; -} - -#endif - - - - -/* ------------------------------------------------------ */ -/* register */ - - -static const struct luaL_Reg paths__ [] = { - {"filep", lua_filep}, - {"dirp", lua_dirp}, - {"basename", lua_basename}, - {"dirname", lua_dirname}, - {"extname", lua_extname}, - {"cwd", lua_cwd}, - {"concat", lua_concatfname}, - {"execdir", lua_execdir}, - {"dir", lua_dir}, - {"tmpname", lua_tmpname}, - {"mkdir", lua_mkdir}, - {"rmdir", lua_rmdir}, - {"uname", lua_uname}, - {"getregistryvalue", lua_getregistryvalue}, - {"require", path_require}, - {NULL, NULL} -}; - - -PATHS_API int -luaopen_libpaths(lua_State *L) -{ - lua_newtable(L); - lua_pushvalue(L, -1); - lua_setglobal(L, "paths"); -#if LUA_VERSION_NUM >= 502 - luaL_setfuncs(L, paths__, 0); -#else - luaL_register(L, NULL, paths__); -#endif - return 1; -} diff --git a/contrib/lua-torch/paths/paths.h.in b/contrib/lua-torch/paths/paths.h.in deleted file mode 100644 index fb5417a6c5..0000000000 --- a/contrib/lua-torch/paths/paths.h.in +++ /dev/null @@ -1,86 +0,0 @@ -/* -*- C -*- */ - -#include "lua.h" -#include "lauxlib.h" - -#include -#include -#include -#include - -#if defined(_WIN32) || defined(LUA_WIN) -# ifdef paths_EXPORTS -# define PATHS_API __declspec(dllexport) -# else -# define PATHS_API __declspec(dllimport) -# endif -#else -# define PATHS_API /**/ -#endif - - -#if defined(_WIN32) || defined(LUA_WIN) - -# include -# include -# include -# include -# include -# include -# include -# include -# include - -#else - -#cmakedefine HAVE_DIRENT_H 1 -#cmakedefine HAVE_FCNTL_H 1 -#cmakedefine HAVE_UNISTD_H 1 -#cmakedefine HAVE_TIME_H 1 -#cmakedefine HAVE_SYS_TIME_H 1 -#cmakedefine HAVE_SYS_NDIR_H 1 -#cmakedefine HAVE_SYS_DIR_H 1 -#cmakedefine HAVE_SYS_UTSNAME_H 1 -#cmakedefine HAVE_NDIR_H 1 -#cmakedefine HAVE_GETCWD 1 -#cmakedefine HAVE_DLOPEN 1 - -# include -# include -# include -# if HAVE_FCNTL_H -# include -# endif -# if HAVE_UNISTD_H -# include -# endif -# if HAVE_SYS_TIME_H -# include -# endif -# if HAVE_SYS_UTSNAME_H -# include -# endif -# if HAVE_TIME_H -# include -# endif -# ifdef HAVE_UNISTD_H -# include -# endif -# ifdef HAVE_DIRENT_H -# include -# define NAMLEN(dirent) strlen((dirent)->d_name) -# else -# define dirent direct -# define NAMLEN(dirent) (dirent)->d_namlen -# if HAVE_SYS_NDIR_H -# include -# endif -# if HAVE_SYS_DIR_H -# include -# endif -# if HAVE_NDIR_H -# include -# endif -# endif - -#endif diff --git a/contrib/lua-torch/torch7/CMakeLists.txt b/contrib/lua-torch/torch7/CMakeLists.txt deleted file mode 100644 index cb73f60960..0000000000 --- a/contrib/lua-torch/torch7/CMakeLists.txt +++ /dev/null @@ -1,50 +0,0 @@ -LIST(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") - -IF (NOT MSVC) - IF (MINGW) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror=format") - ELSE() - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror=implicit-function-declaration -Werror=format") - ENDIF(MINGW) -ENDIF(NOT MSVC) -IF (WITH_OPENMP) - FIND_PACKAGE(OpenMP) - IF(OPENMP_FOUND) - MESSAGE(STATUS "Compiling with OpenMP support") - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") - SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}") - ENDIF(OPENMP_FOUND) -ENDIF (WITH_OPENMP) - -# Includes -INCLUDE(TorchPaths) -INCLUDE(TorchPathsInit) -INCLUDE(TorchPackage) -INCLUDE(TorchWrap) -INCLUDE(TorchExports) - -# Torch libraries -ADD_SUBDIRECTORY(lib) - -CONFIGURE_FILE(paths.lua.in "${CMAKE_CURRENT_BINARY_DIR}/paths.lua") - -INCLUDE_DIRECTORIES(BEFORE "${CMAKE_CURRENT_SOURCE_DIR}/lib/TH") -INCLUDE_DIRECTORIES(BEFORE "${CMAKE_CURRENT_BINARY_DIR}/lib/TH") -INCLUDE_DIRECTORIES(BEFORE "${CMAKE_CURRENT_SOURCE_DIR}/lib/luaT") -LINK_DIRECTORIES("${LUA_LIBDIR}") - -SET(src DiskFile.c File.c MemoryFile.c PipeFile.c Storage.c Tensor.c Timer.c utils.c init.c TensorOperator.c TensorMath.c random.c Generator.c) -SET(luasrc init.lua File.lua Tensor.lua CmdLine.lua FFInterface.lua Tester.lua TestSuite.lua ${CMAKE_CURRENT_BINARY_DIR}/paths.lua test/test.lua) - -# Necessary do generate wrapper -#ADD_TORCH_WRAP(tensormathwrap TensorMath.lua) -#ADD_TORCH_WRAP(randomwrap random.lua) - -ADD_TORCH_PACKAGE(torch "${src}" "${luasrc}") - -TARGET_LINK_LIBRARIES(torch luaT TH) - -IF(LUALIB) - TARGET_LINK_LIBRARIES(torch ${LUALIB}) -ENDIF() diff --git a/contrib/lua-torch/torch7/COPYRIGHT.txt b/contrib/lua-torch/torch7/COPYRIGHT.txt deleted file mode 100644 index bc002b78ab..0000000000 --- a/contrib/lua-torch/torch7/COPYRIGHT.txt +++ /dev/null @@ -1,36 +0,0 @@ -Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) -Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) -Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) -Copyright (c) 2011-2013 NYU (Clement Farabet) -Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) -Copyright (c) 2006 Idiap Research Institute (Samy Bengio) -Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the names of Deepmind Technologies, NYU, NEC Laboratories America - and IDIAP Research Institute nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/lua-torch/torch7/CmdLine.lua b/contrib/lua-torch/torch7/CmdLine.lua deleted file mode 100644 index 643635069e..0000000000 --- a/contrib/lua-torch/torch7/CmdLine.lua +++ /dev/null @@ -1,269 +0,0 @@ -local CmdLine = torch.class('torch.CmdLine') - -local function strip(str) - return string.match(str, '%-*(.*)') -end - -local function pad(str, sz) - return str .. string.rep(' ', sz-#str) -end - -function CmdLine:error(msg) - print('') - io.stderr:write(msg) - print('') - self:help() - os.exit(1) -end - -function CmdLine:__readArgument__(params, arg, i, nArgument) - local argument = self.arguments[nArgument] - local value = arg[i] - - if nArgument > #self.arguments then - self:error('invalid argument: ' .. value) - end - if argument.type and type(value) ~= argument.type then - self:error('invalid argument type for argument ' .. argument.key .. ' (should be ' .. argument.type .. ')') - end - params[strip(argument.key)] = value - return 1 -end - -function CmdLine:__readOption__(params, arg, i) - local key = arg[i] - local option = self.options[key] - if not option then - self:error('unknown option ' .. key) - end - - if option.type and option.type == 'boolean' then - params[strip(key)] = not option.default - return 1 - else - local value = arg[i+1] - if not value then - self:error('missing argument for option ' .. key) - end - if not option.type or option.type == 'string' then - elseif option.type == 'number' then - value = tonumber(value) - else - self:error('unknown required option type ' .. option.type) - end - if not value then - self:error('invalid type for option ' .. key .. ' (should be ' .. option.type .. ')') - end - params[strip(key)] = value - return 2 - end -end - -function CmdLine:__init(argseparator_,keyseparator_) - self.argseparator = argseparator_ or ',' - self.keyseparator = keyseparator_ or '=' - self.options = {} - self.arguments = {} - self.helplines = {} - self.dateformat = nil - self.silentio = false -end - -function CmdLine:silent() - self.silentio = true -end - -function CmdLine:addTime(name, format) - format = format or '%Y-%m-%d %H:%M:%S' - if type(format) ~= 'string' then - error('Argument has to be string') - end - if name ~= nil then - name = '[' .. name .. ']: ' - else - name = '' - end - self.dateformat = format .. name -end - - -function CmdLine:argument(key, help, _type_) - table.insert(self.arguments, {key=key, help=help, type=_type_}) - table.insert(self.helplines, self.arguments[#self.arguments]) -end - -function CmdLine:option(key, default, help, _type_) - if default == nil then - error('option ' .. key .. ' has no default value') - end - _type_ = _type_ or type(default) - if type(default) ~= _type_ then - error('option ' .. key .. ' has wrong default type value') - end - self.options[key] = {key=key, default=default, help=help, type=_type_} - table.insert(self.helplines, self.options[key]) -end - -function CmdLine:default() - local params = {} - for option,v in pairs(self.options) do - params[strip(option)] = v.default - end - return params -end - -function CmdLine:parse(arg) - local i = 1 - local params = self:default() - - local nArgument = 0 - - while i <= #arg do - if arg[i] == '-help' or arg[i] == '-h' or arg[i] == '--help' then - self:help(arg) - os.exit(0) - end - - if self.options[arg[i]] then - i = i + self:__readOption__(params, arg, i) - else - nArgument = nArgument + 1 - i = i + self:__readArgument__(params, arg, i, nArgument) - end - end - - if nArgument ~= #self.arguments then - self:error('not enough arguments') - end - - return params -end - -function CmdLine:string(prefix, params, ignore) - local arguments = {} - local options = {} - prefix = prefix or '' - - for k,v in pairs(params) do - if ignore[k] then - print('-- ignore option ' .. k) - elseif self.options['-' .. k] then - if v ~= self.options['-' .. k].default or ignore[k] == false then - if type(v) == 'boolean' then - if v then - v = 't' - else - v = 'f' - end - end - table.insert(options, k .. self.keyseparator .. v) - print(k,v,self.options['-' .. k].default) - end - else - local narg - for i=1,#self.arguments do - if strip(self.arguments[i].key) == k then - narg = i - end - end - if narg then - arguments[narg] = k .. self.keyseparator .. v - else - print('WARNING: unknown option/argument: ' .. k .. ' IGNORING for DIRECTORY NAME') - end - end - end - table.sort(options) - local str = table.concat(arguments, self.argseparator) - if str == '' then - str = table.concat(options, self.argseparator) - else - str = str .. self.argseparator .. table.concat(options, self.argseparator) - end - if str == '' then - return prefix - else - return prefix .. self.argseparator .. str - end -end - -local oprint = nil -function CmdLine:log(file, params) - local f = (io.type(file) == 'file' and file) or io.open(file, 'w') - oprint = oprint or print -- get the current print function lazily - function print(...) - local n = select("#", ...) - local arg = {...} - if not self.silentio then - oprint(...) - end - local str = {} - if self.dateformat then - table.insert(str, os.date(self.dateformat)) - end - for i=1,n do - table.insert(str,tostring(arg[i])) - end - table.insert(str,'\n') - f:write(table.concat(str,' ')) - f:flush() - end - print('[program started on ' .. os.date() .. ']') - print('[command line arguments]') - if params then - for k,v in pairs(params) do - print(k,v) - end - end - print('[----------------------]') -end - -function CmdLine:text(txt) - txt = txt or '' - assert(type(txt) == 'string') - table.insert(self.helplines, txt) -end - -function CmdLine:help(arg) - io.write('Usage: ') - if arg then io.write(arg[0] .. ' ') end - io.write('[options]') - for i=1,#self.arguments do - io.write(' <' .. strip(self.arguments[i].key) .. '>') - end - io.write('\n') - - -- first pass to compute max length - local optsz = 0 - for _,option in ipairs(self.helplines) do - if type(option) == 'table' then - if option.default ~= nil then -- it is an option - if #option.key > optsz then - optsz = #option.key - end - else -- it is an argument - if #strip(option.key)+2 > optsz then - optsz = #strip(option.key)+2 - end - end - end - end - - -- second pass to print - for _,option in ipairs(self.helplines) do - if type(option) == 'table' then - io.write(' ') - if option.default ~= nil then -- it is an option - io.write(pad(option.key, optsz)) - if option.help then io.write(' ' .. option.help) end - io.write(' [' .. tostring(option.default) .. ']') - else -- it is an argument - io.write(pad('<' .. strip(option.key) .. '>', optsz)) - if option.help then io.write(' ' .. option.help) end - end - else - io.write(option) -- just some additional help - end - io.write('\n') - end -end diff --git a/contrib/lua-torch/torch7/DiskFile.c b/contrib/lua-torch/torch7/DiskFile.c deleted file mode 100644 index c50b74f950..0000000000 --- a/contrib/lua-torch/torch7/DiskFile.c +++ /dev/null @@ -1,103 +0,0 @@ -#include "general.h" - -static int torch_DiskFile_new(lua_State *L) -{ - const char *name = luaL_checkstring(L, 1); - const char *mode = luaL_optstring(L, 2, "r"); - int isQuiet = luaT_optboolean(L, 3, 0); - THFile *self = THDiskFile_new(name, mode, isQuiet); - - luaT_pushudata(L, self, "torch.DiskFile"); - return 1; -} - -static int torch_DiskFile_free(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.DiskFile"); - THFile_free(self); - return 0; -} - -static int torch_DiskFile_isLittleEndianCPU(lua_State *L) -{ - lua_pushboolean(L, THDiskFile_isLittleEndianCPU()); - return 1; -} - -static int torch_DiskFile_isBigEndianCPU(lua_State *L) -{ - lua_pushboolean(L, !THDiskFile_isLittleEndianCPU()); - return 1; -} - -static int torch_DiskFile_nativeEndianEncoding(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.DiskFile"); - THDiskFile_nativeEndianEncoding(self); - lua_settop(L, 1); - return 1; -} - -static int torch_DiskFile_littleEndianEncoding(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.DiskFile"); - THDiskFile_littleEndianEncoding(self); - lua_settop(L, 1); - return 1; -} - -static int torch_DiskFile_bigEndianEncoding(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.DiskFile"); - THDiskFile_bigEndianEncoding(self); - lua_settop(L, 1); - return 1; -} - -static int torch_DiskFile_longSize(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.DiskFile"); - THDiskFile_longSize(self, lua_tointeger(L, 2)); - lua_settop(L, 1); - return 1; -} - -static int torch_DiskFile_noBuffer(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.DiskFile"); - THDiskFile_noBuffer(self); - lua_settop(L, 1); - return 1; -} - -static int torch_DiskFile___tostring__(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.DiskFile"); - lua_pushfstring(L, "torch.DiskFile on <%s> [status: %s -- mode %c%c]", - THDiskFile_name(self), - (THFile_isOpened(self) ? "open" : "closed"), - (THFile_isReadable(self) ? 'r' : ' '), - (THFile_isWritable(self) ? 'w' : ' ')); - - return 1; -} -static const struct luaL_Reg torch_DiskFile__ [] = { - {"isLittleEndianCPU", torch_DiskFile_isLittleEndianCPU}, - {"isBigEndianCPU", torch_DiskFile_isBigEndianCPU}, - {"nativeEndianEncoding", torch_DiskFile_nativeEndianEncoding}, - {"littleEndianEncoding", torch_DiskFile_littleEndianEncoding}, - {"bigEndianEncoding", torch_DiskFile_bigEndianEncoding}, - {"longSize", torch_DiskFile_longSize}, - {"noBuffer", torch_DiskFile_noBuffer}, - {"__tostring__", torch_DiskFile___tostring__}, - {NULL, NULL} -}; - -void torch_DiskFile_init(lua_State *L) -{ - luaT_newmetatable(L, "torch.DiskFile", "torch.File", - torch_DiskFile_new, torch_DiskFile_free, NULL); - - luaT_setfuncs(L, torch_DiskFile__, 0); - lua_pop(L, 1); -} diff --git a/contrib/lua-torch/torch7/FFInterface.lua b/contrib/lua-torch/torch7/FFInterface.lua deleted file mode 100644 index cb8bd3365b..0000000000 --- a/contrib/lua-torch/torch7/FFInterface.lua +++ /dev/null @@ -1,222 +0,0 @@ --- if this causes issues, you may need to: --- luarocks remove --force ffi --- and follow instructions to install --- https://github.com/facebook/luaffifb -local ok, ffi = pcall(require, 'ffi') - -local function checkArgument(condition, fn, ud, msg, level) - local level = level or 3 - if not condition then - error("bad argument #" .. ud .. " to '" .. fn .. "' (" .. msg .. ")", level) - end -end - -local function checkArgumentType(expected, actual, fn, ud, level) - local level = level or 3 - if expected ~= actual then - checkArgument(false, fn, ud, expected .. " expected, got " .. actual, level + 1) - end -end - -if ok then - - local Real2real = { - Byte='unsigned char', - Char='char', - Short='short', - Int='int', - Long='long', - Float='float', - Double='double', - Half='THHalf' - } - - -- Allocator - ffi.cdef[[ -typedef struct THAllocator { - void* (*malloc)(void*, ptrdiff_t); - void* (*realloc)(void*, void*, ptrdiff_t); - void (*free)(void*, void*); -} THAllocator; -]] - - -- Half - ffi.cdef[[ -typedef struct { - unsigned short x; -} __THHalf; -typedef __THHalf THHalf; -]] - - -- Storage - for Real, real in pairs(Real2real) do - - local cdefs = [[ -typedef struct THRealStorage -{ - real *data; - ptrdiff_t size; - int refcount; - char flag; - THAllocator *allocator; - void *allocatorContext; -} THRealStorage; -]] - cdefs = cdefs:gsub('Real', Real):gsub('real', real) - ffi.cdef(cdefs) - - local Storage = torch.getmetatable(string.format('torch.%sStorage', Real)) - local Storage_tt = ffi.typeof('TH' .. Real .. 'Storage**') - - rawset(Storage, - "cdata", - function(self) - return Storage_tt(self)[0] - end) - - rawset(Storage, - "data", - function(self) - return Storage_tt(self)[0].data - end) - end - - -- Tensor - for Real, real in pairs(Real2real) do - - local cdefs = [[ -typedef struct THRealTensor -{ - long *size; - long *stride; - int nDimension; - - THRealStorage *storage; - ptrdiff_t storageOffset; - int refcount; - - char flag; - -} THRealTensor; -]] - cdefs = cdefs:gsub('Real', Real):gsub('real', real) - ffi.cdef(cdefs) - - local Tensor_type = string.format('torch.%sTensor', Real) - local Tensor = torch.getmetatable(Tensor_type) - local Tensor_tt = ffi.typeof('TH' .. Real .. 'Tensor**') - - rawset(Tensor, - "cdata", - function(self) - if not self then return nil; end - return Tensor_tt(self)[0] - end) - - rawset(Tensor, - "data", - function(self) - if not self then return nil; end - self = Tensor_tt(self)[0] - return self.storage ~= nil and self.storage.data + self.storageOffset or nil - end) - - -- faster apply (contiguous case) - if Tensor_type ~= 'torch.HalfTensor' then - local apply = Tensor.apply - rawset(Tensor, - "apply", - function(self, func) - if self:isContiguous() and self.data then - local self_d = self:data() - for i=0,self:nElement()-1 do - local res = func(tonumber(self_d[i])) -- tonumber() required for long... - if res then - self_d[i] = res - end - end - return self - else - return apply(self, func) - end - end) - - -- faster map (contiguous case) - local map = Tensor.map - rawset(Tensor, - "map", - function(self, src, func) - checkArgument(torch.isTensor(src), "map", 1, "tensor expected") - checkArgumentType(self:type(), src:type(), "map", 1) - - if self:isContiguous() and src:isContiguous() and self.data and src.data then - local self_d = self:data() - local src_d = src:data() - assert(src:nElement() == self:nElement(), 'size mismatch') - for i=0,self:nElement()-1 do - local res = func(tonumber(self_d[i]), tonumber(src_d[i])) -- tonumber() required for long... - if res then - self_d[i] = res - end - end - return self - else - return map(self, src, func) - end - end) - - -- faster map2 (contiguous case) - local map2 = Tensor.map2 - rawset(Tensor, - "map2", - function(self, src1, src2, func) - checkArgument(torch.isTensor(src1), "map", 1, "tensor expected") - checkArgument(torch.isTensor(src2), "map", 2, "tensor expected") - checkArgumentType(self:type(), src1:type(), "map", 1) - checkArgumentType(self:type(), src2:type(), "map", 2) - - if self:isContiguous() and src1:isContiguous() and src2:isContiguous() and self.data and src1.data and src2.data then - local self_d = self:data() - local src1_d = src1:data() - local src2_d = src2:data() - assert(src1:nElement() == self:nElement(), 'size mismatch') - assert(src2:nElement() == self:nElement(), 'size mismatch') - for i=0,self:nElement()-1 do - local res = func(tonumber(self_d[i]), tonumber(src1_d[i]), tonumber(src2_d[i])) -- tonumber() required for long... - if res then - self_d[i] = res - end - end - return self - else - return map2(self, src1, src2, func) - end - end) - end - end - - -- torch.data - -- will fail if :data() is not defined - function torch.data(self, asnumber) - if not self then return nil; end - local data = self:data() - if asnumber then - return ffi.cast('intptr_t', data) - else - return data - end - end - - -- torch.cdata - -- will fail if :cdata() is not defined - function torch.cdata(self, asnumber) - if not self then return nil; end - local cdata = self:cdata() - if asnumber then - return ffi.cast('intptr_t', cdata) - else - return cdata - end - end - -end diff --git a/contrib/lua-torch/torch7/File.c b/contrib/lua-torch/torch7/File.c deleted file mode 100644 index e07bc46669..0000000000 --- a/contrib/lua-torch/torch7/File.c +++ /dev/null @@ -1,207 +0,0 @@ -#include "general.h" -#include "THFile.h" -#include "luaT.h" - -#define IMPLEMENT_TORCH_FILE_FLAG(NAME) \ - static int torch_File_##NAME(lua_State *L) \ - { \ - THFile *self = luaT_checkudata(L, 1, "torch.File"); \ - lua_pushboolean(L, THFile_##NAME(self)); \ - return 1; \ - } - -IMPLEMENT_TORCH_FILE_FLAG(isQuiet) -IMPLEMENT_TORCH_FILE_FLAG(isReadable) -IMPLEMENT_TORCH_FILE_FLAG(isWritable) -IMPLEMENT_TORCH_FILE_FLAG(isBinary) -IMPLEMENT_TORCH_FILE_FLAG(isAutoSpacing) -IMPLEMENT_TORCH_FILE_FLAG(hasError) - -#define IMPLEMENT_TORCH_FILE_FUNC(NAME) \ - static int torch_File_##NAME(lua_State *L) \ - { \ - THFile *self = luaT_checkudata(L, 1, "torch.File"); \ - THFile_##NAME(self); \ - lua_settop(L, 1); \ - return 1; \ - } - -IMPLEMENT_TORCH_FILE_FUNC(binary) -IMPLEMENT_TORCH_FILE_FUNC(ascii) -IMPLEMENT_TORCH_FILE_FUNC(autoSpacing) -IMPLEMENT_TORCH_FILE_FUNC(noAutoSpacing) -IMPLEMENT_TORCH_FILE_FUNC(quiet) -IMPLEMENT_TORCH_FILE_FUNC(pedantic) -IMPLEMENT_TORCH_FILE_FUNC(clearError) - -IMPLEMENT_TORCH_FILE_FUNC(synchronize) - -static int torch_File_seek(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.File"); - ptrdiff_t position = luaL_checkinteger(L, 2)-1; - // >= 0 because it has 1 already subtracted - THArgCheck(position >= 0, 2, "position has to be greater than 0!"); - THFile_seek(self, (size_t)position); - lua_settop(L, 1); - return 1; -} - -IMPLEMENT_TORCH_FILE_FUNC(seekEnd) - -static int torch_File_position(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.File"); - lua_pushnumber(L, THFile_position(self)+1); - return 1; -} - -IMPLEMENT_TORCH_FILE_FUNC(close) - -#define IMPLEMENT_TORCH_FILE_RW(TYPEC, TYPE) \ - static int torch_File_read##TYPEC(lua_State *L) \ - { \ - THFile *self = luaT_checkudata(L, 1, "torch.File"); \ - int narg = lua_gettop(L); \ - \ - if(narg == 1) \ - { \ - lua_pushnumber(L, THFile_read##TYPEC##Scalar(self)); \ - return 1; \ - } \ - else if(narg == 2) \ - { \ - if(lua_isnumber(L, 2)) \ - { \ - ptrdiff_t size = lua_tonumber(L, 2); \ - ptrdiff_t nread; \ - \ - TH##TYPEC##Storage *storage = TH##TYPEC##Storage_newWithSize(size); \ - luaT_pushudata(L, storage, "torch." #TYPEC "Storage"); \ - nread = THFile_read##TYPEC(self, storage); \ - if(nread != size) \ - TH##TYPEC##Storage_resize(storage, nread); \ - return 1; \ - } \ - else if(luaT_toudata(L, 2, "torch." #TYPEC "Storage")) \ - { \ - TH##TYPEC##Storage *storage = luaT_toudata(L, 2, "torch." #TYPEC "Storage"); \ - lua_pushnumber(L, THFile_read##TYPEC(self, storage)); \ - return 1; \ - } \ - } \ - \ - luaL_error(L, "nothing, number, or " #TYPEC "Storage expected"); \ - return 0; \ - } \ - \ - static int torch_File_write##TYPEC(lua_State *L) \ - { \ - THFile *self = luaT_checkudata(L, 1, "torch.File"); \ - int narg = lua_gettop(L); \ - \ - if(narg == 2) \ - { \ - if(lua_isnumber(L, 2)) \ - { \ - TYPE value = lua_tonumber(L, 2); \ - THFile_write##TYPEC##Scalar(self, (TYPE)value); \ - return 0; \ - } \ - else if(luaT_toudata(L, 2, "torch." #TYPEC "Storage")) \ - { \ - TH##TYPEC##Storage *storage = luaT_toudata(L, 2, "torch." #TYPEC "Storage"); \ - lua_pushnumber(L, THFile_write##TYPEC(self, storage)); \ - return 1; \ - } \ - } \ - \ - luaL_error(L, "number, or " #TYPEC "Storage expected"); \ - return 0; \ - } - - -IMPLEMENT_TORCH_FILE_RW(Byte, unsigned char) -IMPLEMENT_TORCH_FILE_RW(Char, char) -IMPLEMENT_TORCH_FILE_RW(Short, short) -IMPLEMENT_TORCH_FILE_RW(Int, int) -IMPLEMENT_TORCH_FILE_RW(Long, long) -IMPLEMENT_TORCH_FILE_RW(Float, float) -IMPLEMENT_TORCH_FILE_RW(Double, double) - -static int torch_File_readString(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.File"); - const char *format = luaL_checkstring(L, 2); - char *str; - ptrdiff_t size; - - size = THFile_readStringRaw(self, format, &str); - lua_pushlstring(L, str, size); - THFree(str); - - return 1; -} - -static int torch_File_writeString(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.File"); - const char *str = NULL; - size_t size; - - luaL_checktype(L, 2, LUA_TSTRING); - str = lua_tolstring(L, 2, &size); - lua_pushnumber(L, THFile_writeStringRaw(self, str, size)); - return 1; -} - -static const struct luaL_Reg torch_File__ [] = { - {"isQuiet", torch_File_isQuiet}, - {"isReadable", torch_File_isReadable}, - {"isWritable", torch_File_isWritable}, - {"isBinary", torch_File_isBinary}, - {"isAutoSpacing", torch_File_isAutoSpacing}, - {"hasError", torch_File_hasError}, - {"binary", torch_File_binary}, - {"ascii", torch_File_ascii}, - {"autoSpacing", torch_File_autoSpacing}, - {"noAutoSpacing", torch_File_noAutoSpacing}, - {"quiet", torch_File_quiet}, - {"pedantic", torch_File_pedantic}, - {"clearError", torch_File_clearError}, - - /* DEBUG: CHECK DISK FREE & READ/WRITE STRING*/ - - {"readByte", torch_File_readByte}, - {"readChar", torch_File_readChar}, - {"readShort", torch_File_readShort}, - {"readInt", torch_File_readInt}, - {"readLong", torch_File_readLong}, - {"readFloat", torch_File_readFloat}, - {"readDouble", torch_File_readDouble}, - {"readString", torch_File_readString}, - - {"writeByte", torch_File_writeByte}, - {"writeChar", torch_File_writeChar}, - {"writeShort", torch_File_writeShort}, - {"writeInt", torch_File_writeInt}, - {"writeLong", torch_File_writeLong}, - {"writeFloat", torch_File_writeFloat}, - {"writeDouble", torch_File_writeDouble}, - {"writeString", torch_File_writeString}, - - {"synchronize", torch_File_synchronize}, - {"seek", torch_File_seek}, - {"seekEnd", torch_File_seekEnd}, - {"position", torch_File_position}, - {"close", torch_File_close}, - - {NULL, NULL} -}; - -void torch_File_init(lua_State *L) -{ - luaT_newmetatable(L, "torch.File", NULL, NULL, NULL, NULL); - luaT_setfuncs(L, torch_File__, 0); - lua_pop(L, 1); -} diff --git a/contrib/lua-torch/torch7/File.lua b/contrib/lua-torch/torch7/File.lua deleted file mode 100644 index 62249a361e..0000000000 --- a/contrib/lua-torch/torch7/File.lua +++ /dev/null @@ -1,454 +0,0 @@ -local File = torch.getmetatable('torch.File') - -function File:writeBool(value) - if value then - self:writeInt(1) - else - self:writeInt(0) - end -end - -function File:readBool() - return (self:readInt() == 1) -end - -local TYPE_NIL = 0 -local TYPE_NUMBER = 1 -local TYPE_STRING = 2 -local TYPE_TABLE = 3 -local TYPE_TORCH = 4 -local TYPE_BOOLEAN = 5 -local TYPE_FUNCTION = 6 -local TYPE_RECUR_FUNCTION = 8 -local LEGACY_TYPE_RECUR_FUNCTION = 7 - --- Lua 5.2 compatibility -local loadstring = loadstring or load - -function File:isWritableObject(object) - local typename = type(object) - local typeidx - if type(object) ~= 'boolean' and not object then - typeidx = TYPE_NIL - elseif torch.typename(object) and torch.factory(torch.typename(object)) then - typeidx = TYPE_TORCH - elseif typename == 'table' then - typeidx = TYPE_TABLE - elseif typename == 'number' then - typeidx = TYPE_NUMBER - elseif typename == 'string' then - typeidx = TYPE_STRING - elseif typename == 'boolean' then - typeidx = TYPE_BOOLEAN - elseif typename == 'function' and pcall(string.dump, object) then - typeidx = TYPE_RECUR_FUNCTION - end - return typeidx -end - -function File:referenced(ref) - -- we use an environment to keep a record of written objects - if not torch.getenv(self).writeObjects then - torch.setenv(self, { - writeObjects={}, writeObjectsRef={}, - readObjects={}, - objectNameStack={}, - upvalueRefToId={}, upvalueIdToClosure={}, - }) - end - local env = torch.getenv(self) - env.force = not ref - torch.setenv(self,env) - return self -end - -function File:isReferenced() - -- if no environment, then no forcing setup yet - if not torch.getenv(self).writeObjects then - return true - end - local env = torch.getenv(self) - return not env.force -end - -local function getmetamethod(obj, name) - local func - local status - - -- check getmetatable(obj).__name or - -- check getmetatable(obj).name - status, func = pcall( - function() - -- note that sometimes the metatable is hidden - -- we get it for sure through the torch type system - local mt = torch.getmetatable(torch.typename(obj)) - if mt then - return mt['__' .. name] or mt[name] - end - end - ) - if status and type(func) == 'function' then - return func - end -end - -local UPVALUES_TOKEN = {} -- unique object -local function formatStack(objectNameStack) - -- Format object name stack skipping UPVALUES_TOKEN and upvalue index - local parts = {} - for i, v in ipairs(objectNameStack) do - if v ~= UPVALUES_TOKEN and objectNameStack[i-1] ~= UPVALUES_TOKEN then - table.insert(parts, v) - end - end - return table.concat(parts, '.') -end - -function File:writeObject(object, debugname, hook) - -- define a default hook function if not provided - hook = hook or function(object) return object end - -- we use an environment to keep a record of written objects - if not torch.getenv(self).writeObjects then - torch.setenv(self, { - writeObjects={}, writeObjectsRef={}, - readObjects={}, - objectNameStack={}, - upvalueRefToId={}, upvalueIdToClosure={}, - }) - end - -- That guy is used for references' book-keeping - local sobject = object - -- That guy is the object that is actually persisted - -- hook(object) can be used to modify the object before writing it to the file. - -- Useful for serializing objects under a config - -- that we want to deserialize safely under another config. - -- (e.g. Cuda to Float tensors, cudnn to nn, ...) - object = hook(object) - local force = torch.getenv(self).force - - -- if nil object, only write the type and return - if type(object) ~= 'boolean' and not object then - self:writeInt(TYPE_NIL) - return - end - - local objectNameStack = torch.getenv(self).objectNameStack - table.insert(objectNameStack, debugname or '') - - -- check the type we are dealing with - local typeidx = self:isWritableObject(object) - if not typeidx then - error(string.format('Unwritable object <%s> at %s', type(object), formatStack(objectNameStack))) - end - self:writeInt(typeidx) - - if typeidx == TYPE_NUMBER then - self:writeDouble(object) - elseif typeidx == TYPE_BOOLEAN then - self:writeBool(object) - elseif typeidx == TYPE_STRING then - local stringStorage = torch.CharStorage():string(object) - self:writeInt(#stringStorage) - self:writeChar(stringStorage) - elseif typeidx == TYPE_TORCH or typeidx == TYPE_TABLE or typeidx == TYPE_RECUR_FUNCTION then - -- check it exists already (we look at the pointer!) - local objects = torch.getenv(self).writeObjects - local objectsRef = torch.getenv(self).writeObjectsRef - local index = objects[torch.pointer(sobject)] - - if index and (not force) then - -- if already exists, write only its index - self:writeInt(index) - else - -- else write the object itself - index = objects.nWriteObject or 0 - index = index + 1 - if not force then - objects[torch.pointer(sobject)] = index - objectsRef[object] = index -- we make sure the object is not going to disappear - end - self:writeInt(index) - objects.nWriteObject = index - if typeidx == TYPE_RECUR_FUNCTION then - local upvalueRefToId = torch.getenv(self).upvalueRefToId - -- Unique ID for each ref since lightuserdata are not serializable - local nextId = 1 - for _ in pairs(upvalueRefToId) do nextId=nextId+1 end - local upvalues = {} - local counter = 0 - while true do - counter = counter + 1 - local name,value = debug.getupvalue(object, counter) - if not name then break end - if name == '_ENV' then value = nil end - local id=nil - -- debug.upvalueid exists only for lua>=5.2 and luajit - if debug.upvalueid then - local upvalueRef = debug.upvalueid(object, counter) - if not upvalueRefToId[upvalueRef] then - upvalueRefToId[upvalueRef] = nextId - nextId = nextId + 1 - end - id = upvalueRefToId[upvalueRef] - end - table.insert(upvalues, {name=name, id=id, value=value}) - end - local dumped = string.dump(object) - local stringStorage = torch.CharStorage():string(dumped) - self:writeInt(#stringStorage) - self:writeChar(stringStorage) - self:writeObject(upvalues, UPVALUES_TOKEN, hook) - elseif typeidx == TYPE_TORCH then - local version = torch.CharStorage():string('V ' .. torch.version(object)) - local className = torch.CharStorage():string(torch.typename(object)) - self:writeInt(#version) - self:writeChar(version) - self:writeInt(#className) - self:writeChar(className) - local write = getmetamethod(object, 'write') - if write then - write(object, self) - elseif type(object) == 'table' then - local var = {} - for k,v in pairs(object) do - if self:isWritableObject(v) then - var[k] = v - else - print(string.format('$ Warning: cannot write object field <%s> of <%s> %s', k, torch.typename(object), formatStack(objectNameStack))) - end - end - self:writeObject(var, torch.typename(object), hook) - else - error(string.format('<%s> is a non-serializable Torch object %s', torch.typename(object), formatStack(objectNameStack))) - end - else -- it is a table - local size = 0; for k,v in pairs(object) do size = size + 1 end - self:writeInt(size) - for k,v in pairs(object) do - self:writeObject(k, nil, hook) - local name = (type(k) == 'string' or type(k) == 'number') and tostring(k) or nil - -- special case name for upvalues - if objectNameStack[#objectNameStack-1] == UPVALUES_TOKEN and - name == 'value' and type(object.name) == 'string' then - name = object.name - end - self:writeObject(v, name, hook) - end - end - end - else - error('Unwritable object') - end - table.remove(objectNameStack) -end - -function File:readObject() - -- we use an environment to keep a record of read objects - if not torch.getenv(self).writeObjects then - torch.setenv(self, { - writeObjects={}, writeObjectsRef={}, - readObjects={}, - objectNameStack={}, - upvalueRefToId={}, upvalueIdToClosure={}, - }) - end - - local force = torch.getenv(self).force - - -- read the typeidx - local typeidx = self:readInt() - - -- is it nil? - if typeidx == TYPE_NIL then - return nil - end - - if typeidx == TYPE_NUMBER then - return self:readDouble() - elseif typeidx == TYPE_BOOLEAN then - return self:readBool() - elseif typeidx == TYPE_STRING then - local size = self:readInt() - return self:readChar(size):string() - elseif typeidx == TYPE_FUNCTION then - local size = self:readInt() - local dumped = self:readChar(size):string() - local func, err = loadstring(dumped) - if not func then - io.stderr:write(string.format('Warning: Failed to load function from bytecode: %s', err)) - end - local upvalues = self:readObject() - for index,upvalue in ipairs(upvalues) do - debug.setupvalue(func, index, upvalue) - end - return func - elseif typeidx == TYPE_TABLE or typeidx == TYPE_TORCH or typeidx == TYPE_RECUR_FUNCTION or typeidx == LEGACY_TYPE_RECUR_FUNCTION then - -- read the index - local index = self:readInt() - - -- check it is loaded already - local objects = torch.getenv(self).readObjects - if objects[index] and not force then - return objects[index] - end - - -- otherwise read it - if typeidx == TYPE_RECUR_FUNCTION or typeidx == LEGACY_TYPE_RECUR_FUNCTION then - local size = self:readInt() - local dumped = self:readChar(size):string() - local func, err = loadstring(dumped) - if not func then - io.stderr:write(string.format('Warning: Failed to load function from bytecode: %s', err)) - end - if not force then - objects[index] = func - end - local upvalueIdToClosure = torch.getenv(self).upvalueIdToClosure - local upvalues = self:readObject() - for index,upvalue in ipairs(upvalues) do - if typeidx == LEGACY_TYPE_RECUR_FUNCTION then - debug.setupvalue(func, index, upvalue) - elseif upvalue.name == '_ENV' then - debug.setupvalue(func, index, _ENV) - else - debug.setupvalue(func, index, upvalue.value) - -- debug.upvaluejoin exists only for lua>=5.2 and luajit - if debug.upvaluejoin and upvalue.id then - if upvalueIdToClosure[upvalue.id] then - -- This upvalue is linked to another one - local otherClosure = upvalueIdToClosure[upvalue.id] - debug.upvaluejoin(func, index, otherClosure.func, otherClosure.index) - else - -- Save this closure for next time - upvalueIdToClosure[upvalue.id] = { - func = func, - index = index, - } - end - end - end - end - return func - elseif typeidx == TYPE_TORCH then - local version, className, versionNumber - version = self:readChar(self:readInt()):string() - versionNumber = tonumber(string.match(version, '^V (.*)$')) - if not versionNumber then - className = version - versionNumber = 0 -- file created before existence of versioning system - else - className = self:readChar(self:readInt()):string() - end - if not torch.factory(className) then - error(string.format('unknown Torch class <%s>', tostring(className))) - end - local object = torch.factory(className)(self) - if not force then - objects[index] = object - end - local read = getmetamethod(object, 'read') - if read then - read(object, self, versionNumber) - elseif type(object) == 'table' then - local var = self:readObject() - for k,v in pairs(var) do - object[k] = v - end - else - error(string.format('Cannot load object class <%s>', tostring(className))) - end - return object - else -- it is a table - local size = self:readInt() - local object = {} - if not force then - objects[index] = object - end - for i = 1,size do - local k = self:readObject() - local v = self:readObject() - object[k] = v - end - return object - end - else - error('unknown object') - end -end - --- simple helpers to save/load arbitrary objects/tables -function torch.save(filename, object, mode, referenced) - assert(mode == nil or mode == 'binary' or mode == 'ascii', '"binary" or "ascii" (or nil) expected for mode') - assert(referenced == nil or referenced == true or referenced == false, 'true or false (or nil) expected for referenced') - mode = mode or 'binary' - referenced = referenced == nil and true or referenced - local file = torch.DiskFile(filename, 'w') - file[mode](file) - file:referenced(referenced) - file:writeObject(object) - file:close() -end - -function torch.load(filename, mode, referenced) - assert(mode == 'binary' or mode == 'b32' or mode == 'b64' or - mode == nil or mode == 'ascii', - '"binary", "b32", "b64" or "ascii" (or nil) expected for mode') - assert(referenced == nil or referenced == true or referenced == false, - 'true or false (or nil) expected for referenced') - local longSize - if mode == 'b32' or mode == 'b64' then - longSize = tonumber(mode:match('%d+')) / 8 - mode = 'binary' - end - mode = mode or 'binary' - referenced = referenced == nil and true or referenced - local file = torch.DiskFile(filename, 'r') - file[mode](file) - file:referenced(referenced) - if longSize then file:longSize(longSize) end - local object = file:readObject() - file:close() - return object -end - --- simple helpers to serialize/deserialize arbitrary objects/tables -function torch.serialize(object, mode) - local storage = torch.serializeToStorage(object, mode) - return storage:string() -end - --- Serialize to a CharStorage, not a lua string. This avoids -function torch.serializeToStorage(object, mode) - mode = mode or 'binary' - local f = torch.MemoryFile() - f = f[mode](f) - f:writeObject(object) - local storage = f:storage() - -- the storage includes an extra NULL character: get rid of it - storage:resize(storage:size()-1) - f:close() - return storage -end - -function torch.deserializeFromStorage(storage, mode) - mode = mode or 'binary' - local tx = torch.CharTensor(storage) - local xp = torch.CharStorage(tx:size(1)+1) - local txp = torch.CharTensor(xp) - txp:narrow(1,1,tx:size(1)):copy(tx) - txp[tx:size(1)+1] = 0 - local f = torch.MemoryFile(xp) - f = f[mode](f) - local object = f:readObject() - f:close() - return object -end - -function torch.deserialize(str, mode) - local storage = torch.CharStorage():string(str) - return torch.deserializeFromStorage(storage, mode) -end - --- public API (saveobj/loadobj are safe for global import) -torch.saveobj = torch.save -torch.loadobj = torch.load diff --git a/contrib/lua-torch/torch7/Generator.c b/contrib/lua-torch/torch7/Generator.c deleted file mode 100644 index 8cf5ba66c2..0000000000 --- a/contrib/lua-torch/torch7/Generator.c +++ /dev/null @@ -1,50 +0,0 @@ -#include - -int torch_Generator_new(lua_State *L) -{ - THGenerator *gen = THGenerator_new(); - luaT_pushudata(L, gen, torch_Generator); - return 1; -} - -int torch_Generator_free(lua_State *L) -{ - THGenerator *gen= luaT_checkudata(L, 1, torch_Generator); - THGenerator_free(gen); - return 0; -} - -static int torch_Generator_write(lua_State *L) -{ - THGenerator *gen = luaT_checkudata(L, 1, torch_Generator); - THFile *file = luaT_checkudata(L, 2, "torch.File"); - - THFile_writeByteRaw(file, (unsigned char *)gen, sizeof(THGenerator)); - return 0; -} - -static int torch_Generator_read(lua_State *L) -{ - THGenerator *gen = luaT_checkudata(L, 1, torch_Generator); - THFile *file = luaT_checkudata(L, 2, "torch.File"); - - THFile_readByteRaw(file, (unsigned char *)gen, sizeof(THGenerator)); - return 0; -} - - -static const struct luaL_Reg torch_Generator_table_ [] = { - {"write", torch_Generator_write}, - {"read", torch_Generator_read}, - {NULL, NULL} -}; - -#define torch_Generator_factory torch_Generator_new - -void torch_Generator_init(lua_State *L) -{ - luaT_newmetatable(L, torch_Generator, NULL, - torch_Generator_new, torch_Generator_free, torch_Generator_factory); - luaT_setfuncs(L, torch_Generator_table_, 0); - lua_pop(L, 1); -} diff --git a/contrib/lua-torch/torch7/MemoryFile.c b/contrib/lua-torch/torch7/MemoryFile.c deleted file mode 100644 index a22dc17ec5..0000000000 --- a/contrib/lua-torch/torch7/MemoryFile.c +++ /dev/null @@ -1,70 +0,0 @@ -#include "general.h" - -static int torch_MemoryFile_new(lua_State *L) -{ - const char *mode; - THCharStorage *storage = luaT_toudata(L, 1, "torch.CharStorage"); - THFile *self; - - if(storage) - { - mode = luaL_optstring(L, 2, "rw"); - self = THMemoryFile_newWithStorage(storage, mode); - } - else - { - mode = luaL_optstring(L, 1, "rw"); - self = THMemoryFile_new(mode); - } - - luaT_pushudata(L, self, "torch.MemoryFile"); - return 1; -} - -static int torch_MemoryFile_storage(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.MemoryFile"); - THCharStorage_retain(THMemoryFile_storage(self)); - luaT_pushudata(L, THMemoryFile_storage(self), "torch.CharStorage"); - return 1; -} - -static int torch_longSize(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.MemoryFile"); - THMemoryFile_longSize(self, lua_tointeger(L, 2)); - lua_settop(L, 1); - return 1; -} - -static int torch_MemoryFile_free(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.MemoryFile"); - THFile_free(self); - return 0; -} - -static int torch_MemoryFile___tostring__(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.MemoryFile"); - lua_pushfstring(L, "torch.MemoryFile [status: %s -- mode: %c%c]", - (THFile_isOpened(self) ? "open" : "closed"), - (THFile_isReadable(self) ? 'r' : ' '), - (THFile_isWritable(self) ? 'w' : ' ')); - return 1; -} - -static const struct luaL_Reg torch_MemoryFile__ [] = { - {"storage", torch_MemoryFile_storage}, - {"longSize", torch_longSize}, - {"__tostring__", torch_MemoryFile___tostring__}, - {NULL, NULL} -}; - -void torch_MemoryFile_init(lua_State *L) -{ - luaT_newmetatable(L, "torch.MemoryFile", "torch.File", - torch_MemoryFile_new, torch_MemoryFile_free, NULL); - luaT_setfuncs(L, torch_MemoryFile__, 0); - lua_pop(L, 1); -} diff --git a/contrib/lua-torch/torch7/PipeFile.c b/contrib/lua-torch/torch7/PipeFile.c deleted file mode 100644 index a47c90d136..0000000000 --- a/contrib/lua-torch/torch7/PipeFile.c +++ /dev/null @@ -1,43 +0,0 @@ -#include "general.h" - -static int torch_PipeFile_new(lua_State *L) -{ - const char *name = luaL_checkstring(L, 1); - const char *mode = luaL_optstring(L, 2, "r"); - int isQuiet = luaT_optboolean(L, 3, 0); - THFile *self = THPipeFile_new(name, mode, isQuiet); - - luaT_pushudata(L, self, "torch.PipeFile"); - return 1; -} - -static int torch_PipeFile_free(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.PipeFile"); - THFile_free(self); - return 0; -} - -static int torch_PipeFile___tostring__(lua_State *L) -{ - THFile *self = luaT_checkudata(L, 1, "torch.PipeFile"); - lua_pushfstring(L, "torch.PipeFile on <%s> [status: %s -- mode: %c%c]", - THDiskFile_name(self), - (THFile_isOpened(self) ? "open" : "closed"), - (THFile_isReadable(self) ? 'r' : ' '), - (THFile_isWritable(self) ? 'w' : ' ')); - return 1; -} - -static const struct luaL_Reg torch_PipeFile__ [] = { - {"__tostring__", torch_PipeFile___tostring__}, - {NULL, NULL} -}; - -void torch_PipeFile_init(lua_State *L) -{ - luaT_newmetatable(L, "torch.PipeFile", "torch.DiskFile", - torch_PipeFile_new, torch_PipeFile_free, NULL); - luaT_setfuncs(L, torch_PipeFile__, 0); - lua_pop(L, 1); -} diff --git a/contrib/lua-torch/torch7/README.md b/contrib/lua-torch/torch7/README.md deleted file mode 100644 index 1a51c314ca..0000000000 --- a/contrib/lua-torch/torch7/README.md +++ /dev/null @@ -1,45 +0,0 @@ -[![Join the chat at https://gitter.im/torch/torch7](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/torch/torch7?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://travis-ci.org/torch/torch7.svg)](https://travis-ci.org/torch/torch7) - -## Need help? ## - -* Questions, Support, Install issues: [Google groups](https://groups.google.com/forum/#!forum/torch7) -* Reporting bugs: [torch7](https://github.com/torch/torch7/issues) [nn](https://github.com/torch/nn/issues) [cutorch](https://github.com/torch/cutorch/issues) [cunn](https://github.com/torch/cutorch/issues) [optim](https://github.com/torch/optim/issues) [threads](https://github.com/torch/threads/issues) -* Hanging out with other developers and users (strictly no install issues, no large blobs of text): [Gitter Chat](https://gitter.im/torch/torch7) - - -# Torch Package Reference Manual # - -__Torch__ is the main package in [Torch7](http://torch.ch) where data -structures for multi-dimensional tensors and mathematical operations -over these are defined. Additionally, it provides many utilities for -accessing files, serializing objects of arbitrary types and other -useful utilities. - - -## Torch Packages ## - - * Tensor Library - * [Tensor](doc/tensor.md) defines the _all powerful_ tensor object that provides multi-dimensional numerical arrays with type templating. - * [Mathematical operations](doc/maths.md) that are defined for the tensor object types. - * [Storage](doc/storage.md) defines a simple storage interface that controls the underlying storage for any tensor object. - * File I/O Interface Library - * [File](doc/file.md) is an abstract interface for common file operations. - * [Disk File](doc/diskfile.md) defines operations on files stored on disk. - * [Memory File](doc/memoryfile.md) defines operations on stored in RAM. - * [Pipe File](doc/pipefile.md) defines operations for using piped commands. - * [High-Level File operations](doc/serialization.md) defines higher-level serialization functions. - * Useful Utilities - * [Timer](doc/timer.md) provides functionality for _measuring time_. - * [Tester](doc/tester.md) is a generic tester framework. - * [CmdLine](doc/cmdline.md) is a command line argument parsing utility. - * [Random](doc/random.md) defines a random number generator package with various distributions. - * Finally useful [utility](doc/utility.md) functions are provided for easy handling of torch tensor types and class inheritance. - - -## Useful Links ## - - * [Community packages](https://github.com/torch/torch7/wiki/Cheatsheet) - * [Torch Blog](http://torch.ch/blog/) - * [Torch Slides](https://github.com/soumith/cvpr2015/blob/master/cvpr-torch.pdf) - diff --git a/contrib/lua-torch/torch7/ROADMAP.md b/contrib/lua-torch/torch7/ROADMAP.md deleted file mode 100644 index 01bc8b8ac4..0000000000 --- a/contrib/lua-torch/torch7/ROADMAP.md +++ /dev/null @@ -1,144 +0,0 @@ - -# Torch Roadmap (August 2015 - March 2016) - -This roadmap document is intended to serve as a loose plan of our vision for Torch in the short term. -It is open to community feedback and contribution and only intends to serve as an initial draft. -After community feedback, we shall freeze it and work on it. - -The roadmap focuses on five separate things - -- Core development: improving the core technically. Design changes, code refactors, performance, they go here. -- Documentation and Accessibility: Outlining the changes in documentation, and improving general user and developer documentation in various ways. -- Versioning and Packaging: Planned and much needed changes to the packaging of Torch are discussed here. -- Continuous Build Infrastructure: Making our continuous builds more robust, introducing CUDA and OpenCL contbuilds etc. -- Other improvements - - -## Torch Core Project Development - - - New class system: - - **[definite]** with no global side-effects (i.e. the class constructor should be scoped into its parent package) - Get rid of every statement/system that has a global effect on the environment (torch.setdefaultensortype => dangerous and not clean) - - **[needs discussion]** fully serializable (i.e. when deserializing/reloading a model, there shouldn't be a need to load libraries that defined the class originally, like nn; the class definition should be serialized as well: this would remove a lot of backward compatibility hacks that we have to add to class definitions currently - - **koray**: I like this, but wouldn't it break backward compatibility? - Currently, whatever we serialize, it is just the data and implementation is defined - at load time, so if a bug is fixed (or introduced) you use that. - And it starts being ambiguous, what if I load a layer from file and - create a new one and their implementation is inconsistent...) - - **[definite]** Get rid of non-tensor-related stuff (like serialization) in TH, and move it to lua side - - **[needs discussion]** OpenMP: Should it stay or go? Is Threads sufficient? - - **Ronan**: I really wonder about this guy, especially now that I have been using threads intensively. I am not sure that fine-grine threading is necessary. - - **koray**: I guess you mean with threading, there is no need for OpenMP, but I disagree. - Our convolution layer will use multiple threads and then if we run a ReLu over a huge state space, it would become embarrassingly slow. - We shouldn't expect everyone to run their experiments in a threading framework. It is more work than necessary sometimes.) - - **[needs discussion]** Templated C++ in TH Core? - - **Ronan**: Should I cleanup TH core? In the end, I am scared to move to C++, but some iterators based taking a closure could be nice (I have some of those that I could add easily). - I could move to C++ if it was only template + keeping pointers (and not C++11/14/17, because that would limit the number of users that it can reach because of the latest compilers needed etc.). - - **[definite]** Migrate to a single, better/modern testing support - - **koray**: like some aspects of Totem, but should be in core Tester - - **[definite]** Benchmarking support in Tester - - **[definite]** Consistent testing scripts across all core projects - - **[definite]** 'nn' container unified interface between containers and graph - - **[mostly definite]** Switch to batch only assumption in 'nn'. Right now, the code is unnecessarily complicated for stochastic/batch confusion, we needed extra functions like nInputDims and such. - - **[needs discussion]** Support named arguments in the constructor for all 'nn' layers. - - **[definite]** 'rnn' package. - - **Soumith**: Nicholas Leonard's seems to be a good one. - - **[mostly definite]** argcheck for all core functions in torch. Get rid of cwrap's ugliness. - - **[definite]** improve paths to support more file system operations - - **Clement**: could lfs and penlight be made more standard? penlight is a heavy package but provides so much utility - - **Soumith**: I think penlight is lightweight and provides strong utility, definitely consider dependence. - - **[definite]** JIT/Lua/FFI/GC: - - **koray**: I think Torch should be agnostic to whatever is the backend; - - **clement**: yes! - - at this point, we need to have all core packages use the regular Lua api (almost the case) - - **Ronan**: agreed. - -- **[definite]** plan to have standalone FFI? - - Facebook releases their puc LUA based FFI package mostly improved by Sam Gross - - [needs discussion] **Ronan** improves it a bit more to use Leon's C99 parser - - **Koray**: I am not opposed to Leon's C99 parser, but we should not have the QT like situation where - it relies mostly on Leon to maintain it. - And, still we need to have FFI since there are people and packages that rely on it now. -- **[definite]** Lua 5.2 migration (I think it's already finished ;) ). -- **[mostly definite]** Lua 5.3 migration -- **[mostly definite]** Optionally replace GC by Ref-counting (existing version in luajit-rocks; but completely broken but will need to be fixed) -- **[needs discussion]** Make OpenCL support more visible under torch/opencl (**Soumith**: Hugh Perkins will maintain it of course ;) ). -- **[definite]** Split nn into THNN and nn. THNN would be NN package using TH as backend and nn would be the lua layer. THNN can be used as a standalone C library. Same for cunn -- **[Definite]** CUDA typed tensor support - CudaHalfTensor CudaDoubleTensor etc. -- **[Definite]** better plotting support -- **[needs discussion]** UI package that doesn't suck? - - **Ronan**: something based on cairo? - - **clement**: not sure if this would have much adoption - - **Ronan**: yes, it is a worry. I started to do some fancy stuff there, it is not that hard. - However, I would need quite some time to polish it. - I think having something fully customizable from lua really - makes a difference (rather than something like Qt, for example). - - something based on a web client? - - **clement**: i like the idea of itorch but could never easily build it, build process is too big. - - **Ronan**: I cannot use something which forces me to use global variables. - - **koray**: I think at the end of the day, we need to have both a GUI client and a web based client. - My main problem with web based clients is that I can't easily create - custom displays to play an animation or such. - It is an offline process that I need to generate a movie and then load it in. - This and similar things make it hard to use for me. - Also, I agree, I actually could not install iTorch on my laptop - before cvpr tutorial somehow, it did not want to work :). - - **soumith**: I think we should propose a common display API that any interface can implement, - that way the users don't need to change scripts across different UI backends. - Also, szym/display is a good candidate for the Web UI, ITorch is indeed a bit of a pain to install. - - - Should we endorse iTorch for everyone to use? - - **Ronan**: I know **Soumith** likes it, but I am not a big fan. - - Heavy+encourages the use of global variables. Excellent for tutorials, though. - - This ties to the first question in **Other Questions** section. - - Can we/community do pull requests on iTorch? ( **Soumith**: Yes ) - - First step would be to leanify dependencies and/or install procedure (**Soumith**: agreed) -- **[needs discussion]** How about Penlight? It has many crucial things that people use. - Should we endorse it, use some things from it? Replicate some things in penlight in torch? - - **clement**: upvoting this! we use it extensively. - - **Ronan**: I live better with less abstractions, but I can be convinced there. - However, I find penlight quite big. - There are things like the classes that I do not like as well (because of the way they chose for creating classes). -- **[needs discussion]** how about Moses? New lean functional package that's pretty useful -- **[definite]** A style guide - - Guidelines are super important: - - for Lua: at least impose strict camel case + 3 spaces (no tab) - - for C: camel case + use of underscore to represent namespace scoping + 2 spaces - -## Documentation + Accessibility - - - Tutorials: provide guidelines and basic framework/standard to write and publish tutorials? - - Universal dataset API - - Dataset classes for several popular datasets - - high performance, thread support etc. - - support CPU and GPU - - Model Zoo + Training scripts, with training scripts we can highlight Torch's strengths - - How do we build a super friendly model zoo? git repo of pre-trained models? - - Better documentation support, have a doc server - - Documentation for TH/THC interface and design - - Inline documentation parser - - doc/shell integration (maybe this is still working but needs redoing?) - -## Versioning + Packaging - - Package owners need to start releasing frequent versions (i.e. torch v7.0.1, 7.0.2, ...) - - scm packages should become deprecated - - Packages need to avoid global side effects, and return themselves as simple tables (Lua 5.2 started enforcing this on the C side) - - Provide standard AMI instances that people can launch (already loosely done by the community). We can load it with many standard+optional packages and/or provide one line option to update to latest. - -## Build Infrastructure Requirements - - Prepare core distro release - - Professional Continuous build for distro and individual core projects - - Continuous build for GPU - - continuous build should include testing - - The distro should be build and tested at every pull into any of the member projects - - CI for Linux and OSX - -## Other Questions? - - If there is a project that seems good from outside or consortium, how do we endorse/improve/modify that? - - do we put some technical criteria to do that? - - being able to do pull requests? - - Licensing? - - or maybe maintain a list of suggested packages? - - when does existence of a package stop us from developing the same in core torch? - - **Soumith**: I think this should largely be community driven and by popularity. Top starred or watched repos in the ecosystem would be a good start. - diff --git a/contrib/lua-torch/torch7/Storage.c b/contrib/lua-torch/torch7/Storage.c deleted file mode 100644 index 730d974767..0000000000 --- a/contrib/lua-torch/torch7/Storage.c +++ /dev/null @@ -1,18 +0,0 @@ -#include "general.h" - -struct _rspamd_lua_text { - const char *start; - unsigned int len; - unsigned int flags; -}; - -#define torch_Storage_(NAME) TH_CONCAT_4(torch_,Real,Storage_,NAME) -#define THFile_readRealRaw TH_CONCAT_3(THFile_read, Real, Raw) -#define THFile_writeRealRaw TH_CONCAT_3(THFile_write, Real, Raw) -#define torch_Storage TH_CONCAT_STRING_3(torch.,Real,Storage) - -#include "generic/Storage.c" -#include "THGenerateAllTypes.h" - -#include "generic/Storage.c" -#include "THGenerateHalfType.h" diff --git a/contrib/lua-torch/torch7/Tensor.c b/contrib/lua-torch/torch7/Tensor.c deleted file mode 100644 index bf78d1aae1..0000000000 --- a/contrib/lua-torch/torch7/Tensor.c +++ /dev/null @@ -1,12 +0,0 @@ -#include "general.h" - -#define torch_Storage_(NAME) TH_CONCAT_4(torch_,Real,Storage_,NAME) -#define torch_Storage TH_CONCAT_STRING_3(torch.,Real,Storage) -#define torch_Tensor_(NAME) TH_CONCAT_4(torch_,Real,Tensor_,NAME) -#define torch_Tensor TH_CONCAT_STRING_3(torch.,Real,Tensor) - -#include "generic/Tensor.c" -#include "THGenerateAllTypes.h" - -#include "generic/Tensor.c" -#include "THGenerateHalfType.h" diff --git a/contrib/lua-torch/torch7/Tensor.lua b/contrib/lua-torch/torch7/Tensor.lua deleted file mode 100644 index 9a8215be1f..0000000000 --- a/contrib/lua-torch/torch7/Tensor.lua +++ /dev/null @@ -1,573 +0,0 @@ --- additional methods for Storage -local Storage = {} - --- additional methods for Tensor -local Tensor = {} - --- types -local types = {'Byte', 'Char', 'Short', 'Int', 'Long', 'Float', 'Half', 'Double'} - --- Lua 5.2 compatibility -local log10 = math.log10 or function(x) return math.log(x, 10) end - --- tostring() functions for Tensor and Storage -local function Storage__printformat(self) - if self:size() == 0 then - return "", nil, 0 - end - local intMode = true - local type = torch.typename(self) --- if type == 'torch.FloatStorage' or type == 'torch.DoubleStorage' then - for i=1,self:size() do - if self[i] ~= math.ceil(self[i]) then - intMode = false - break - end - end --- end - local tensor = torch.DoubleTensor(torch.DoubleStorage(self:size()):copy(self), 1, self:size()):abs() - local expMin = tensor:min() - if expMin ~= 0 then - expMin = math.floor(log10(expMin)) + 1 - else - expMin = 1 - end - local expMax = tensor:max() - if expMax ~= 0 then - expMax = math.floor(log10(expMax)) + 1 - else - expMax = 1 - end - - local format - local scale - local sz - if intMode then - if expMax > 9 then - format = "%11.4e" - sz = 11 - else - format = "%SZd" - sz = expMax + 1 - end - else - if expMax-expMin > 4 then - format = "%SZ.4e" - sz = 11 - if math.abs(expMax) > 99 or math.abs(expMin) > 99 then - sz = sz + 1 - end - else - if expMax > 5 or expMax < 0 then - format = "%SZ.4f" - sz = 7 - scale = math.pow(10, expMax-1) - else - format = "%SZ.4f" - if expMax == 0 then - sz = 7 - else - sz = expMax+6 - end - end - end - end - format = string.gsub(format, 'SZ', sz) - if scale == 1 then - scale = nil - end - return format, scale, sz -end - -function Storage.__tostring__(self) - local strt = {} - local format,scale = Storage__printformat(self) - if format:sub(2,4) == 'nan' then format = '%f' end - if scale then - table.insert(strt, string.format('%g', scale) .. ' *\n') - for i = 1,self:size() do - table.insert(strt, string.format(format, self[i]/scale) .. '\n') - end - else - for i = 1,self:size() do - table.insert(strt, string.format(format, self[i]) .. '\n') - end - end - table.insert(strt, '[' .. torch.typename(self) .. ' of size ' .. self:size() .. ']\n') - local str = table.concat(strt) - return str -end - -for _,type in ipairs(types) do - local metatable = torch.getmetatable('torch.' .. type .. 'Storage') - for funcname, func in pairs(Storage) do - rawset(metatable, funcname, func) - end -end - -local function Tensor__printMatrix(self, indent) - local format,scale,sz = Storage__printformat(self:storage()) - if format:sub(2,4) == 'nan' then format = '%f' end --- print('format = ' .. format) - scale = scale or 1 - indent = indent or '' - local strt = {indent} - local nColumnPerLine = math.floor((80-#indent)/(sz+1)) --- print('sz = ' .. sz .. ' and nColumnPerLine = ' .. nColumnPerLine) - local firstColumn = 1 - local lastColumn = -1 - while firstColumn <= self:size(2) do - if firstColumn + nColumnPerLine - 1 <= self:size(2) then - lastColumn = firstColumn + nColumnPerLine - 1 - else - lastColumn = self:size(2) - end - if nColumnPerLine < self:size(2) then - if firstColumn ~= 1 then - table.insert(strt, '\n') - end - table.insert(strt, 'Columns ' .. firstColumn .. ' to ' .. lastColumn .. '\n' .. indent) - end - if scale ~= 1 then - table.insert(strt, string.format('%g', scale) .. ' *\n ' .. indent) - end - for l=1,self:size(1) do - local row = self:select(1, l) - for c=firstColumn,lastColumn do - table.insert(strt, string.format(format, row[c]/scale)) - if c == lastColumn then - table.insert(strt, '\n') - if l~=self:size(1) then - if scale ~= 1 then - table.insert(strt, indent .. ' ') - else - table.insert(strt, indent) - end - end - else - table.insert(strt, ' ') - end - end - end - firstColumn = lastColumn + 1 - end - local str = table.concat(strt) - return str -end - -local function Tensor__printTensor(self) - local counter = torch.LongStorage(self:nDimension()-2) - local strt = {''} - local finished - counter:fill(1) - counter[1] = 0 - while true do - for i=1,self:nDimension()-2 do - counter[i] = counter[i] + 1 - if counter[i] > self:size(i) then - if i == self:nDimension()-2 then - finished = true - break - end - counter[i] = 1 - else - break - end - end - if finished then - break - end --- print(counter) - if #strt > 1 then - table.insert(strt, '\n') - end - table.insert(strt, '(') - local tensor = self - for i=1,self:nDimension()-2 do - tensor = tensor:select(1, counter[i]) - table.insert(strt, counter[i] .. ',') - end - table.insert(strt, '.,.) = \n') - table.insert(strt, Tensor__printMatrix(tensor, ' ')) - end - return table.concat(strt) -end - -function Tensor.__tostring__(self) - local strt = {''} - if self:nDimension() == 0 then - table.insert(strt, '[' .. torch.typename(self) .. ' with no dimension]\n') - else - local tensor = torch.DoubleTensor():resize(self:size()):copy(self) - if tensor:nDimension() == 1 then - local format,scale,sz = Storage__printformat(tensor:storage()) - if format:sub(2,4) == 'nan' then format = '%f' end - if scale then - table.insert(strt, string.format('%g', scale) .. ' *\n') - for i = 1,tensor:size(1) do - table.insert(strt, string.format(format, tensor[i]/scale) .. '\n') - end - else - for i = 1,tensor:size(1) do - table.insert(strt, string.format(format, tensor[i]) .. '\n') - end - end - table.insert(strt, '[' .. torch.typename(self) .. ' of size ' .. tensor:size(1) .. ']\n') - elseif tensor:nDimension() == 2 then - table.insert(strt, Tensor__printMatrix(tensor)) - table.insert(strt, '[' .. torch.typename(self) .. ' of size ' .. tensor:size(1) .. 'x' .. tensor:size(2) .. ']\n') - else - table.insert(strt, Tensor__printTensor(tensor)) - table.insert(strt, '[' .. torch.typename(self) .. ' of size ') - for i=1,tensor:nDimension() do - table.insert(strt, tensor:size(i)) - if i ~= tensor:nDimension() then - table.insert(strt, 'x') - end - end - table.insert(strt, ']\n') - end - end - return table.concat(strt) -end - -function Tensor.type(self,type) - local current = torch.typename(self) - if not type then return current end - if type ~= current then - local new = torch.getmetatable(type).new() - if self:nElement() > 0 then - new:resize(self:size()):copy(self) - end - return new - else - return self - end -end - -function Tensor.typeAs(self,tensor) - return self:type(tensor:type()) -end - -function Tensor.byte(self) - return self:type('torch.ByteTensor') -end - -function Tensor.char(self) - return self:type('torch.CharTensor') -end - -function Tensor.short(self) - return self:type('torch.ShortTensor') -end - -function Tensor.int(self) - return self:type('torch.IntTensor') -end - -function Tensor.long(self) - return self:type('torch.LongTensor') -end - -function Tensor.float(self) - return self:type('torch.FloatTensor') -end - -function Tensor.double(self) - return self:type('torch.DoubleTensor') -end - -function Tensor.half(self) - return self:type('torch.HalfTensor') -end - -function Tensor.real(self) - return self:type(torch.getdefaulttensortype()) -end - -function Tensor.expand(result,tensor,...) - -- get sizes - local sizes = {...} - - local t = torch.type(tensor) - if (t == 'number' or t == 'torch.LongStorage') then - table.insert(sizes,1,tensor) - tensor = result - result = tensor.new() - end - - -- check type - local size - if torch.type(sizes[1])=='torch.LongStorage' then - size = sizes[1] - else - size = torch.LongStorage(#sizes) - for i,s in ipairs(sizes) do - size[i] = s - end - end - - -- get dimensions - local tensor_dim = tensor:dim() - local tensor_stride = tensor:stride() - local tensor_size = tensor:size() - - -- check nb of dimensions - if #size ~= tensor:dim() then - error('the number of dimensions provided must equal tensor:dim()') - end - - -- create a new geometry for tensor: - for i = 1,tensor_dim do - if tensor_size[i] == 1 then - tensor_size[i] = size[i] - tensor_stride[i] = 0 - elseif tensor_size[i] ~= size[i] then - error('incorrect size: only supporting singleton expansion (size=1)') - end - end - - -- create new view, with singleton expansion: - result:set(tensor:storage(), tensor:storageOffset(), - tensor_size, tensor_stride) - return result -end -torch.expand = Tensor.expand - -function Tensor.expandAs(result,tensor,template) - if template then - return result:expand(tensor,template:size()) - end - return result:expand(tensor:size()) -end -torch.expandAs = Tensor.expandAs - -function Tensor.repeatTensor(result,tensor,...) - -- get sizes - local sizes = {...} - - local t = torch.type(tensor) - if (t == 'number' or t == 'torch.LongStorage') then - table.insert(sizes,1,tensor) - tensor = result - result = tensor.new() - end - -- if not contiguous, then force the tensor to be contiguous - if not tensor:isContiguous() then tensor = tensor:clone() end - - -- check type - local size - if torch.type(sizes[1])=='torch.LongStorage' then - size = sizes[1] - else - size = torch.LongStorage(#sizes) - for i,s in ipairs(sizes) do - size[i] = s - end - end - if size:size() < tensor:dim() then - error('Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor') - end - local xtensor = tensor.new():set(tensor) - local xsize = xtensor:size():totable() - for i=1,size:size()-tensor:dim() do - table.insert(xsize,1,1) - end - size = torch.DoubleTensor(xsize):cmul(torch.DoubleTensor(size:totable())):long():storage() - xtensor:resize(torch.LongStorage(xsize)) - result:resize(size) - local urtensor = result.new(result) - for i=1,xtensor:dim() do - urtensor = urtensor:unfold(i,xtensor:size(i),xtensor:size(i)) - end - for i=1,urtensor:dim()-xtensor:dim() do - table.insert(xsize,1,1) - end - xtensor:resize(torch.LongStorage(xsize)) - local xxtensor = xtensor:expandAs(urtensor) - urtensor:copy(xxtensor) - return result -end -torch.repeatTensor = Tensor.repeatTensor - ---- One of the size elements can be -1, - --- a new LongStorage is then returned. - --- The length of the unspecified dimension - --- is inferred from the number of remaining elements. -local function specifyFully(size, nElements) - local nCoveredElements = 1 - local remainingDim = nil - local sizes = size:totable() - for i = 1, #sizes do - local wantedDimSize = sizes[i] - if wantedDimSize == -1 then - if remainingDim then - error("Only one of torch.view dimensions can be -1.") - end - remainingDim = i - else - nCoveredElements = nCoveredElements * wantedDimSize - end - end - - if not remainingDim then - return size - end - - assert(nElements % nCoveredElements == 0, "The number of covered elements is not a multiple of all elements.") - local copy = torch.LongStorage(sizes) - copy[remainingDim] = nElements / nCoveredElements - return copy -end - --- TODO : This should be implemented in TH and and wrapped. -function Tensor.view(result, src, ...) - local size = ... - local view, tensor - local function istensor(tensor) - return torch.typename(tensor) and torch.typename(tensor):find('torch.*Tensor') - end - local function isstorage(storage) - return torch.typename(storage) and torch.typename(storage) == 'torch.LongStorage' - end - if istensor(result) and istensor(src) and type(size) == 'number' then - size = torch.LongStorage{...} - view = result - tensor = src - elseif istensor(result) and istensor(src) and isstorage(size) then - size = size - view = result - tensor = src - elseif istensor(result) and isstorage(src) and size == nil then - size = src - tensor = result - view = tensor.new() - elseif istensor(result) and type(src) == 'number' then - size = {...} - table.insert(size,1,src) - size = torch.LongStorage(size) - tensor = result - view = tensor.new() - else - local t1 = 'torch.Tensor, torch.Tensor, number [, number ]*' - local t2 = 'torch.Tensor, torch.Tensor, torch.LongStorage' - local t3 = 'torch.Tensor, torch.LongStorage' - local t4 = 'torch.Tensor, number [, number ]*' - error(string.format('torch.view, expected (%s) or\n (%s) or\n (%s)\n or (%s)', t1, t2, t3, t4)) - end - local origNElement = tensor:nElement() - size = specifyFully(size, origNElement) - - assert(tensor:isContiguous(), "expecting a contiguous tensor") - view:set(tensor:storage(), tensor:storageOffset(), size) - if view:nElement() ~= origNElement then - local inputSize = table.concat(tensor:size():totable(), "x") - local outputSize = table.concat(size:totable(), "x") - error(string.format("Wrong size for view. Input size: %s. Output size: %s", - inputSize, outputSize)) - end - return view -end -torch.view = Tensor.view - -function Tensor.viewAs(result, src, template) - if template and torch.typename(template) then - return result:view(src, template:size()) - elseif template == nil then - template = src - src = result - result = src.new() - return result:view(src, template:size()) - else - local t1 = 'torch.Tensor, torch.Tensor, torch.LongStorage' - local t2 = 'torch.Tensor, torch.LongStorage' - error(string.format('expecting (%s) or (%s)', t1, t2)) - end -end -torch.viewAs = Tensor.viewAs - -function Tensor.split(result, tensor, splitSize, dim) - if torch.type(result) ~= 'table' then - dim = splitSize - splitSize = tensor - tensor = result - result = {} - else - -- empty existing result table before using it - for k,v in pairs(result) do - result[k] = nil - end - end - dim = dim or 1 - local start = 1 - while start <= tensor:size(dim) do - local size = math.min(splitSize, tensor:size(dim) - start + 1) - local split = tensor:narrow(dim, start, size) - table.insert(result, split) - start = start + size - end - return result -end -torch.split = Tensor.split - -function Tensor.chunk(result, tensor, nChunk, dim) - if torch.type(result) ~= 'table' then - dim = nChunk - nChunk = tensor - tensor = result - result = {} - end - dim = dim or 1 - local splitSize = math.ceil(tensor:size(dim)/nChunk) - return torch.split(result, tensor, splitSize, dim) -end -torch.chunk = Tensor.chunk - -function Tensor.totable(tensor) - local result = {} - local dim = tensor:dim() - if dim == 1 then - tensor:apply(function(i) table.insert(result, i) end) - elseif dim > 0 then - for i = 1, tensor:size(1) do - table.insert(result, tensor[i]:totable()) - end - end - return result -end -torch.totable = Tensor.totable - -function Tensor.permute(tensor, ...) - local perm = {...} - local nDims = tensor:dim() - assert(#perm == nDims, 'Invalid permutation') - local j - for i, p in ipairs(perm) do - if p ~= i and p ~= 0 then - j = i - repeat - assert(0 < perm[j] and perm[j] <= nDims, 'Invalid permutation') - tensor = tensor:transpose(j, perm[j]) - j, perm[j] = perm[j], 0 - until perm[j] == i - perm[j] = j - end - end - return tensor -end -torch.permute = Tensor.permute - -for _,type in ipairs(types) do - local metatable = torch.getmetatable('torch.' .. type .. 'Tensor') - for funcname, func in pairs(Tensor) do - if funcname ~= 'totable' or type ~='Half' then - rawset(metatable, funcname, func) - else - local function Tensor__totable(self) - local host_tensor = self:float() - return self:float():totable() - end - rawset(torch.getmetatable('torch.HalfTensor'), 'totable', Tensor__totable) - end - end -end diff --git a/contrib/lua-torch/torch7/TensorMath.c b/contrib/lua-torch/torch7/TensorMath.c deleted file mode 100644 index 84a8218123..0000000000 --- a/contrib/lua-torch/torch7/TensorMath.c +++ /dev/null @@ -1,117889 +0,0 @@ -#include "TH.h" -#include "THMath.h" -#include "luaT.h" -#include "utils.h" - -static const void* torch_istensortype(lua_State *L, const char *tname) -{ - if(!tname) - return NULL; - - if(!luaT_pushmetatable(L, tname)) - return NULL; - - lua_pushstring(L, "torch"); - lua_rawget(L, -2); - if(lua_istable(L, -1)) - return tname; - else - { - lua_pop(L, 2); - return NULL; - } - - return NULL; -} - -static int torch_isnonemptytable(lua_State *L, int idx) -{ - int empty; - if (!lua_istable(L, idx)) return 0; - - lua_rawgeti(L, idx, 1); - empty = lua_isnil(L, -1); - lua_pop(L, 1); - return !empty; -} - -static const void* torch_istensorarray(lua_State *L, int idx) -{ - const char* tname; - int tensor_idx; - if (!torch_isnonemptytable(L, idx)) return 0; - - lua_checkstack(L, 3); - lua_rawgeti(L, idx, 1); - tensor_idx = lua_gettop(L); - tname = (torch_istensortype(L, luaT_typename(L, -1))); - lua_remove(L, tensor_idx); - return tname; -} - -/* WARNING: autogenerated file */ - -#ifndef _CWRAP_STR_ARG_TYPES_4821726c1947cdf3eebacade98173939 -#define _CWRAP_STR_ARG_TYPES_4821726c1947cdf3eebacade98173939 -#include "string.h" -static void str_arg_types(lua_State *L, char *buf, int n) { - int i; - int nargs = lua_gettop(L); - if (nargs == 0) { - snprintf(buf, n, "no arguments provided"); - return; - } - for (i = 1; i <= nargs; i++) { - int l; - const char *torch_type = luaT_typename(L, i); - if(torch_type && !strncmp(torch_type, "torch.", 6)) torch_type += 6; - if (torch_type) l = snprintf(buf, n, "%s ", torch_type); - else if(lua_isnil(L, i)) l = snprintf(buf, n, "%s ", "nil"); - else if(lua_isboolean(L, i)) l = snprintf(buf, n, "%s ", "boolean"); - else if(lua_isnumber(L, i)) l = snprintf(buf, n, "%s ", "number"); - else if(lua_isstring(L, i)) l = snprintf(buf, n, "%s ", "string"); - else if(lua_istable(L, i)) l = snprintf(buf, n, "%s ", "table"); - else if(lua_isuserdata(L, i)) l = snprintf(buf, n, "%s ", "userdata"); - else l = snprintf(buf, n, "%s ", "???"); - if (l >= n) return; - buf += l; - n -= l; - } -} -#endif -static int torch_ByteTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_zero(arg1); -return 1; -} - -static int torch_zero(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "zero"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.zero() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_fill(arg1,arg2); -return 1; -} - -static int torch_fill(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "fill"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.fill() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THByteTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_zeros(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "zeros"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.zeros() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THByteTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_ones(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "ones"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.ones() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_reshape(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "reshape"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.reshape() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THByteTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_gather(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "gather"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.gather() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -unsigned char arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* index LongTensor ByteTensor | *ByteTensor* index LongTensor unsigned char", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THByteTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THByteTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_scatter(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "scatter"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.scatter() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -THByteTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor ByteTensor", type_buf); -} -arg3 = THByteTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int torch_dot(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "dot"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.dot() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -THByteTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor ByteTensor", type_buf); -} -arg3 = THByteTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int torch_equal(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "equal"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.equal() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -THByteTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 2; -arg4 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 2; -arg6 = (unsigned char)lua_tonumber(L, 2); -arg4 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | [*ByteTensor*] ByteTensor [unsigned char] ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ByteTensor"); -THByteTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_add(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "add"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.add() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -THByteTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 2; -arg4 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 2; -arg6 = (unsigned char)lua_tonumber(L, 2); -arg4 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | [*ByteTensor*] ByteTensor [unsigned char] ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ByteTensor"); -THByteTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_csub(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "csub"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.csub() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int torch_mul(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "mul"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.mul() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_div(arg1,arg2,arg3); -return 1; -} - -static int torch_div(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "div"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.div() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int torch_lshift(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "lshift"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.lshift() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int torch_rshift(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "rshift"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.rshift() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_fmod(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "fmod"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.fmod() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int torch_remainder(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "remainder"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.remainder() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int torch_bitand(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "bitand"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.bitand() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int torch_bitor(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "bitor"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.bitor() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_bitxor(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "bitxor"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.bitxor() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_mod(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "mod"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.mod() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -unsigned char arg4 = 0; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg4 = (unsigned char)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -arg4 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char unsigned char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_clamp(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "clamp"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.clamp() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (unsigned char)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor [unsigned char]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_match(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "match"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.match() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int torch_cmul(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cmul"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cmul() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int torch_cpow(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cpow"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cpow() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int torch_cdiv(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cdiv"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cdiv() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int torch_clshift(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "clshift"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.clshift() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int torch_crshift(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "crshift"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.crshift() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_cfmod(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cfmod"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cfmod() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int torch_cremainder(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cremainder"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cremainder() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int torch_cbitand(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cbitand"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cbitand() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int torch_cbitor(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cbitor"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cbitor() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_cbitxor(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cbitxor"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cbitxor() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_cmod(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cmod"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cmod() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [unsigned char] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_addcmul(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "addcmul"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.addcmul() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [unsigned char] ByteTensor ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_addcdiv(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "addcdiv"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.addcdiv() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THByteTensor_new(); -THByteTensor_resize1d(arg1, arg5->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~2D ByteTensor~1D", type_buf); -} -THByteTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_mv(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "mv"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.mv() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THByteTensor_new(); -THByteTensor_resize2d(arg1, arg5->size[0], arg6->size[1]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~2D ByteTensor~2D", type_buf); -} -THByteTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_mm(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "mm"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.mm() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THByteTensor_new(); -THByteTensor_resize3d(arg1, arg5->size[0], arg5->size[1], arg6->size[2]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~3D ByteTensor~3D", type_buf); -} -THByteTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_bmm(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "bmm"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.bmm() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THByteTensor_new(); -THByteTensor_resize2d(arg1, arg5->size[0], arg6->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~1D ByteTensor~1D", type_buf); -} -THByteTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_ger(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "ger"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.ger() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (unsigned char)lua_tonumber(L, 1); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (unsigned char)lua_tonumber(L, 1); -arg4 = (unsigned char)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -arg4 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [unsigned char] ByteTensor~1D [unsigned char] ByteTensor~2D ByteTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_addmv(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "addmv"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.addmv() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (unsigned char)lua_tonumber(L, 1); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg4 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (unsigned char)lua_tonumber(L, 1); -arg4 = (unsigned char)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -arg4 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [unsigned char] ByteTensor~2D [unsigned char] ByteTensor~2D ByteTensor~2D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_addmm(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "addmm"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.addmm() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (unsigned char)lua_tonumber(L, 1); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (unsigned char)lua_tonumber(L, 1); -arg4 = (unsigned char)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 6, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -arg4 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [unsigned char] ByteTensor~2D [unsigned char] ByteTensor~1D ByteTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_addr(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "addr"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.addr() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (unsigned char)lua_tonumber(L, 1); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (unsigned char)lua_tonumber(L, 1); -arg4 = (unsigned char)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -arg4 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [unsigned char] ByteTensor~2D [unsigned char] ByteTensor~3D ByteTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_addbmm(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "addbmm"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.addbmm() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (unsigned char)lua_tonumber(L, 1); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (unsigned char)lua_tonumber(L, 1); -arg4 = (unsigned char)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -arg4 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [unsigned char] ByteTensor~3D [unsigned char] ByteTensor~3D ByteTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_baddbmm(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "baddbmm"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.baddbmm() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor", type_buf); -} -arg2 = THByteTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_numel(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "numel"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.numel() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int torch_cumsum(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cumsum"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cumsum() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int torch_cumprod(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cumprod"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cumprod() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -long arg2 = 0; -THByteTensor *arg3 = NULL; -int arg3_idx = 0; -THByteTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor | [*ByteTensor*] ByteTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THByteTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ByteTensor"); -THByteTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_sum(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "sum"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.sum() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -long arg2 = 0; -THByteTensor *arg3 = NULL; -int arg3_idx = 0; -THByteTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor | [*ByteTensor*] ByteTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THByteTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ByteTensor"); -THByteTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_prod(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "prod"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.prod() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -unsigned char arg2 = 0; -THByteTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THByteTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor | [*ByteTensor*] [*LongTensor*] ByteTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THByteTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ByteTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THByteTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_min(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "min"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.min() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -unsigned char arg2 = 0; -THByteTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THByteTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor | [*ByteTensor*] [*LongTensor*] ByteTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THByteTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ByteTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THByteTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_max(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "max"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.max() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (unsigned char)lua_tonumber(L, 2); -arg4 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor | [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ByteTensor"); -THByteTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_cmin(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cmin"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cmin() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (unsigned char)lua_tonumber(L, 2); -arg4 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor | [*ByteTensor*] ByteTensor unsigned char", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ByteTensor"); -THByteTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_cmax(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cmax"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cmax() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor", type_buf); -} -arg2 = THByteTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_trace(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "trace"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.trace() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_cross(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cross"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cross() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int torch_diag(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "diag"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.diag() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int torch_eye(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "eye"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.eye() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -long arg4 = 1; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] long long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_range(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "range"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.range() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg3 = (long)lua_tonumber(L, 1); -arg1 = THByteTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [Generator] long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_randperm(arg1,arg2,arg3); - -THByteTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_randperm(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "randperm"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.randperm() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THByteTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg2_idx = 1; -arg1 = THByteTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [*LongTensor*] ByteTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THByteTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_sort(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "sort"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.sort() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THByteTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg2_idx = 1; -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THByteTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [*LongTensor*] ByteTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THByteTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_topk(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "topk"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.topk() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THByteTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [*LongTensor*] ByteTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THByteTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_kthvalue(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "kthvalue"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.kthvalue() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THByteTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg2_idx = 1; -arg1 = THByteTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [*LongTensor*] ByteTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THByteTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_mode(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "mode"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.mode() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THByteTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg2_idx = 1; -arg1 = THByteTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [*LongTensor*] ByteTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THByteTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_median(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "median"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.median() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int torch_tril(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "tril"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.tril() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int torch_triu(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "triu"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.triu() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -long arg4 = -2; -THByteTensor *arg5 = NULL; -int arg5_idx = 0; -THByteTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THByteTensor**)THAlloc(arg6_size * sizeof(THByteTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ByteTensor"))) - luaL_error(L, "expected ByteTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THByteTensor**)THAlloc(arg6_size * sizeof(THByteTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ByteTensor"))) - luaL_error(L, "expected ByteTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THByteTensor**)THAlloc(arg6_size * sizeof(THByteTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ByteTensor"))) - luaL_error(L, "expected ByteTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THByteTensor**)THAlloc(arg6_size * sizeof(THByteTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ByteTensor"))) - luaL_error(L, "expected ByteTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor [index] | [*ByteTensor*] {ByteTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.ByteTensor"); -THByteTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static int torch_cat(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cat"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cat() function", tname); - - return lua_gettop(L); -} - -static long THRandom_random2__(THGenerator *gen, long a, long b) -{ - THArgCheck(b >= a, 2, "upper bound must be larger than lower bound"); - return((THRandom_random(gen) % (b+1-a)) + a); -} - -static long THRandom_random1__(THGenerator *gen, long b) -{ - THArgCheck(b > 0, 1, "upper bound must be strictly positive"); - return(THRandom_random(gen) % b + 1); -} - -static void THByteTensor_random2__(THByteTensor *self, THGenerator *gen, long a, long b) -{ - THArgCheck(b >= a, 2, "upper bound must be larger than lower bound"); - TH_TENSOR_APPLY(unsigned char, self, *self_data = ((THRandom_random(gen) % (b+1-a)) + a);) -} - -static void THByteTensor_random1__(THByteTensor *self, THGenerator *gen, long b) -{ - THArgCheck(b > 0, 1, "upper bound must be strictly positive"); - TH_TENSOR_APPLY(unsigned char, self, *self_data = (THRandom_random(gen) % b + 1);) -} - -static int torch_ByteTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THByteTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THByteTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *ByteTensor* [Generator] long long | *ByteTensor* [Generator] long | *ByteTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THByteTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THByteTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int torch_random(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "random"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.random() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *ByteTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_geometric(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "geometric"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.geometric() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *ByteTensor* [Generator] [double] | *ByteTensor* [Generator] FloatTensor | *ByteTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THByteTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_bernoulli(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "bernoulli"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.bernoulli() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -int arg3_idx = 0; -THByteTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor | [*ByteTensor*] ByteTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THByteTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ByteTensor"); -{int hasdims = arg4->nDimension > 1; -THByteTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THByteTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int torch_squeeze(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "squeeze"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.squeeze() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_sign(arg1,arg2); -return 1; -} - -static int torch_sign(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "sign"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.sign() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -unsigned char arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -unsigned char arg11 = 0; -unsigned char arg12 = 1; -THByteTensor *arg13 = NULL; -THByteTensor *arg14 = NULL; -unsigned char arg15 = 1; -unsigned char arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THByteTensor *arg19 = NULL; -int arg19_idx = 0; -unsigned char arg20 = 0; -unsigned char arg21 = 1; -THByteTensor *arg22 = NULL; -THByteTensor *arg23 = NULL; -unsigned char arg24 = 1; -unsigned char arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THByteTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THByteTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THByteTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THByteTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~2D ByteTensor~2D [(V|F)] | [*ByteTensor*] ByteTensor~3D ByteTensor~3D [(V|F)] | [*ByteTensor*] ByteTensor~3D ByteTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.ByteTensor"); -THByteTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.ByteTensor"); -THByteTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_conv2(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "conv2"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.conv2() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -unsigned char arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -unsigned char arg11 = 0; -unsigned char arg12 = 1; -THByteTensor *arg13 = NULL; -THByteTensor *arg14 = NULL; -unsigned char arg15 = 1; -unsigned char arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THByteTensor *arg19 = NULL; -int arg19_idx = 0; -unsigned char arg20 = 0; -unsigned char arg21 = 1; -THByteTensor *arg22 = NULL; -THByteTensor *arg23 = NULL; -unsigned char arg24 = 1; -unsigned char arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THByteTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THByteTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THByteTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THByteTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~2D ByteTensor~2D [(V|F)] | [*ByteTensor*] ByteTensor~3D ByteTensor~3D [(V|F)] | [*ByteTensor*] ByteTensor~3D ByteTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.ByteTensor"); -THByteTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.ByteTensor"); -THByteTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_xcorr2(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "xcorr2"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.xcorr2() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -unsigned char arg7 = 1; -unsigned char arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THByteTensor *arg11 = NULL; -int arg11_idx = 0; -unsigned char arg12 = 0; -unsigned char arg13 = 1; -THByteTensor *arg14 = NULL; -THByteTensor *arg15 = NULL; -unsigned char arg16 = 1; -unsigned char arg17 = 1; -unsigned char arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THByteTensor *arg21 = NULL; -int arg21_idx = 0; -unsigned char arg22 = 0; -unsigned char arg23 = 1; -THByteTensor *arg24 = NULL; -THByteTensor *arg25 = NULL; -unsigned char arg26 = 1; -unsigned char arg27 = 1; -unsigned char arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THByteTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THByteTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THByteTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THByteTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~3D ByteTensor~3D [(V|F)] | [*ByteTensor*] ByteTensor~4D ByteTensor~4D [(V|F)] | [*ByteTensor*] ByteTensor~4D ByteTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.ByteTensor"); -THByteTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.ByteTensor"); -THByteTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_conv3(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "conv3"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.conv3() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -unsigned char arg7 = 1; -unsigned char arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THByteTensor *arg11 = NULL; -int arg11_idx = 0; -unsigned char arg12 = 0; -unsigned char arg13 = 1; -THByteTensor *arg14 = NULL; -THByteTensor *arg15 = NULL; -unsigned char arg16 = 1; -unsigned char arg17 = 1; -unsigned char arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THByteTensor *arg21 = NULL; -int arg21_idx = 0; -unsigned char arg22 = 0; -unsigned char arg23 = 1; -THByteTensor *arg24 = NULL; -THByteTensor *arg25 = NULL; -unsigned char arg26 = 1; -unsigned char arg27 = 1; -unsigned char arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THByteTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THByteTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THByteTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THByteTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~3D ByteTensor~3D [(V|F)] | [*ByteTensor*] ByteTensor~4D ByteTensor~4D [(V|F)] | [*ByteTensor*] ByteTensor~4D ByteTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.ByteTensor"); -THByteTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.ByteTensor"); -THByteTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_xcorr3(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "xcorr3"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.xcorr3() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_lt(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "lt"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.lt() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_gt(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "gt"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.gt() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_le(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "le"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.le() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_ge(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "ge"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.ge() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_eq(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "eq"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.eq() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_ne(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "ne"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.ne() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THByteTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_nonzero(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "nonzero"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.nonzero() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_all(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor", type_buf); -} -arg2 = THByteTensor_logicalall(arg1); -lua_pushboolean(L, arg2); -return 1; -} - -static int torch_all(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "all"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.all() function", tname); - - return lua_gettop(L); -} - -static int torch_ByteTensor_any(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor", type_buf); -} -arg2 = THByteTensor_logicalany(arg1); -lua_pushboolean(L, arg2); -return 1; -} - -static int torch_any(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "any"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.any() function", tname); - - return lua_gettop(L); -} - -#ifndef _CWRAP_STR_ARG_TYPES_4821726c1947cdf3eebacade98173939 -#define _CWRAP_STR_ARG_TYPES_4821726c1947cdf3eebacade98173939 -#include "string.h" -static void str_arg_types(lua_State *L, char *buf, int n) { - int i; - int nargs = lua_gettop(L); - if (nargs == 0) { - snprintf(buf, n, "no arguments provided"); - return; - } - for (i = 1; i <= nargs; i++) { - int l; - const char *torch_type = luaT_typename(L, i); - if(torch_type && !strncmp(torch_type, "torch.", 6)) torch_type += 6; - if (torch_type) l = snprintf(buf, n, "%s ", torch_type); - else if(lua_isnil(L, i)) l = snprintf(buf, n, "%s ", "nil"); - else if(lua_isboolean(L, i)) l = snprintf(buf, n, "%s ", "boolean"); - else if(lua_isnumber(L, i)) l = snprintf(buf, n, "%s ", "number"); - else if(lua_isstring(L, i)) l = snprintf(buf, n, "%s ", "string"); - else if(lua_istable(L, i)) l = snprintf(buf, n, "%s ", "table"); - else if(lua_isuserdata(L, i)) l = snprintf(buf, n, "%s ", "userdata"); - else l = snprintf(buf, n, "%s ", "???"); - if (l >= n) return; - buf += l; - n -= l; - } -} -#endif -static int m_torch_ByteTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_zero(arg1); -return 1; -} - -static int m_torch_ByteTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (unsigned char)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_fill(arg1,arg2); -return 1; -} - -static int m_torch_ByteTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_ByteTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_ByteTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_ByteTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THByteTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_ByteTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -unsigned char arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* index LongTensor ByteTensor | *ByteTensor* index LongTensor unsigned char", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THByteTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THByteTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -THByteTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor ByteTensor", type_buf); -} -arg3 = THByteTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int m_torch_ByteTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -THByteTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor ByteTensor", type_buf); -} -arg3 = THByteTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int m_torch_ByteTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -THByteTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char | *ByteTensor* [ByteTensor] [unsigned char] ByteTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THByteTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -THByteTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char | *ByteTensor* [ByteTensor] [unsigned char] ByteTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THByteTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_div(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -unsigned char arg4 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg4 = (unsigned char)lua_tonumber(L, 3); -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -arg4 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] unsigned char unsigned char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_ByteTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* ByteTensor ByteTensor [unsigned char]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_ByteTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] [unsigned char] ByteTensor ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_ByteTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor] [unsigned char] ByteTensor ByteTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_ByteTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* ByteTensor~2D ByteTensor~1D", type_buf); -} -THByteTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THByteTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_ByteTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* ByteTensor~2D ByteTensor~2D", type_buf); -} -THByteTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THByteTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_ByteTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* ByteTensor~3D ByteTensor~3D", type_buf); -} -THByteTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THByteTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_ByteTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* ByteTensor~1D ByteTensor~1D", type_buf); -} -THByteTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THByteTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_ByteTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -unsigned char arg8 = 0; -THByteTensor *arg9 = NULL; -unsigned char arg10 = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (unsigned char)lua_tonumber(L, 2); -arg10 = (unsigned char)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg9->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.ByteTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (unsigned char)lua_tonumber(L, 2); -arg10 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor~1D* [ByteTensor~1D] [unsigned char] ByteTensor~2D ByteTensor~1D | *ByteTensor~1D* unsigned char [ByteTensor~1D] unsigned char ByteTensor~2D ByteTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THByteTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THByteTensor_addmv(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -unsigned char arg8 = 0; -THByteTensor *arg9 = NULL; -unsigned char arg10 = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (unsigned char)lua_tonumber(L, 2); -arg10 = (unsigned char)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.ByteTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (unsigned char)lua_tonumber(L, 2); -arg10 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor~2D* [ByteTensor~2D] [unsigned char] ByteTensor~2D ByteTensor~2D | *ByteTensor~2D* unsigned char [ByteTensor~2D] unsigned char ByteTensor~2D ByteTensor~2D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THByteTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THByteTensor_addmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -unsigned char arg8 = 0; -THByteTensor *arg9 = NULL; -unsigned char arg10 = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (unsigned char)lua_tonumber(L, 2); -arg10 = (unsigned char)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 6, "torch.ByteTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (unsigned char)lua_tonumber(L, 2); -arg10 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor~2D* [ByteTensor~2D] [unsigned char] ByteTensor~1D ByteTensor~1D | *ByteTensor~2D* unsigned char [ByteTensor~2D] unsigned char ByteTensor~1D ByteTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THByteTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THByteTensor_addr(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -unsigned char arg8 = 0; -THByteTensor *arg9 = NULL; -unsigned char arg10 = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (unsigned char)lua_tonumber(L, 2); -arg10 = (unsigned char)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.ByteTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (unsigned char)lua_tonumber(L, 2); -arg10 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor~2D* [ByteTensor~2D] [unsigned char] ByteTensor~3D ByteTensor~3D | *ByteTensor~2D* unsigned char [ByteTensor~2D] unsigned char ByteTensor~3D ByteTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THByteTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THByteTensor_addbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 1; -THByteTensor *arg3 = NULL; -unsigned char arg4 = 1; -THByteTensor *arg5 = NULL; -THByteTensor *arg6 = NULL; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -unsigned char arg8 = 0; -THByteTensor *arg9 = NULL; -unsigned char arg10 = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.ByteTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (unsigned char)lua_tonumber(L, 2); -arg10 = (unsigned char)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg9->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.ByteTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.ByteTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (unsigned char)lua_tonumber(L, 2); -arg10 = (unsigned char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor~3D* [ByteTensor~3D] [unsigned char] ByteTensor~3D ByteTensor~3D | *ByteTensor~3D* unsigned char [ByteTensor~3D] unsigned char ByteTensor~3D ByteTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THByteTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THByteTensor_baddbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor", type_buf); -} -arg2 = THByteTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_ByteTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -long arg2 = 0; -THByteTensor *arg3 = NULL; -int arg3_idx = 0; -THByteTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor | [*ByteTensor*] ByteTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THByteTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ByteTensor"); -THByteTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -long arg2 = 0; -THByteTensor *arg3 = NULL; -int arg3_idx = 0; -THByteTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor | [*ByteTensor*] ByteTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THByteTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ByteTensor"); -THByteTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -unsigned char arg2 = 0; -THByteTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THByteTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor | [*ByteTensor*] [*LongTensor*] ByteTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THByteTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ByteTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THByteTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_ByteTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -unsigned char arg2 = 0; -THByteTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THByteTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor | [*ByteTensor*] [*LongTensor*] ByteTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THByteTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ByteTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THByteTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_ByteTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (unsigned char)lua_tonumber(L, 1); -arg4 = THByteTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (unsigned char)lua_tonumber(L, 2); -arg4 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [ByteTensor] ByteTensor | [*ByteTensor*] [ByteTensor] unsigned char", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ByteTensor"); -THByteTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (unsigned char)lua_tonumber(L, 1); -arg4 = THByteTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (unsigned char)lua_tonumber(L, 2); -arg4 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [ByteTensor] ByteTensor | [*ByteTensor*] [ByteTensor] unsigned char", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ByteTensor"); -THByteTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor", type_buf); -} -arg2 = THByteTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_ByteTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_ByteTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -long arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* long long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_ByteTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [Generator] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_randperm(arg1,arg2,arg3); - -THByteTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_ByteTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THByteTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg2_idx = 1; -arg1 = THByteTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [*LongTensor*] ByteTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THByteTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_ByteTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THByteTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg2_idx = 1; -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THByteTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THByteTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [*LongTensor*] ByteTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THByteTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_ByteTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THByteTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THByteTensor_new(); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [*LongTensor*] ByteTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THByteTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_ByteTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THByteTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg2_idx = 1; -arg1 = THByteTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [*LongTensor*] ByteTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THByteTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_ByteTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THByteTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg2_idx = 1; -arg1 = THByteTensor_new(); -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THByteTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THByteTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] [*LongTensor*] ByteTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THByteTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_ByteTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ByteTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -long arg4 = -2; -THByteTensor *arg5 = NULL; -int arg5_idx = 0; -THByteTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THByteTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ByteTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THByteTensor**)THAlloc(arg6_size * sizeof(THByteTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ByteTensor"))) - luaL_error(L, "expected ByteTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THByteTensor**)THAlloc(arg6_size * sizeof(THByteTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ByteTensor"))) - luaL_error(L, "expected ByteTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THByteTensor**)THAlloc(arg6_size * sizeof(THByteTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ByteTensor"))) - luaL_error(L, "expected ByteTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THByteTensor**)THAlloc(arg6_size * sizeof(THByteTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ByteTensor"))) - luaL_error(L, "expected ByteTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor ByteTensor [index] | [*ByteTensor*] {ByteTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.ByteTensor"); -THByteTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THByteTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THByteTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *ByteTensor* [Generator] long long | *ByteTensor* [Generator] long | *ByteTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THByteTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THByteTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *ByteTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *ByteTensor* [Generator] [double] | *ByteTensor* [Generator] FloatTensor | *ByteTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THByteTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -THByteTensor *arg3 = NULL; -int arg3_idx = 0; -THByteTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor | [*ByteTensor*] ByteTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THByteTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ByteTensor"); -{int hasdims = arg4->nDimension > 1; -THByteTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THByteTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ByteTensor* [ByteTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THByteTensor_sign(arg1,arg2); -return 1; -} - -static int m_torch_ByteTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -unsigned char arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -unsigned char arg11 = 0; -unsigned char arg12 = 1; -THByteTensor *arg13 = NULL; -THByteTensor *arg14 = NULL; -unsigned char arg15 = 1; -unsigned char arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THByteTensor *arg19 = NULL; -int arg19_idx = 0; -unsigned char arg20 = 0; -unsigned char arg21 = 1; -THByteTensor *arg22 = NULL; -THByteTensor *arg23 = NULL; -unsigned char arg24 = 1; -unsigned char arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THByteTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THByteTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THByteTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THByteTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~2D ByteTensor~2D [(V|F)] | [*ByteTensor*] ByteTensor~3D ByteTensor~3D [(V|F)] | [*ByteTensor*] ByteTensor~3D ByteTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.ByteTensor"); -THByteTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.ByteTensor"); -THByteTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -unsigned char arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -unsigned char arg11 = 0; -unsigned char arg12 = 1; -THByteTensor *arg13 = NULL; -THByteTensor *arg14 = NULL; -unsigned char arg15 = 1; -unsigned char arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THByteTensor *arg19 = NULL; -int arg19_idx = 0; -unsigned char arg20 = 0; -unsigned char arg21 = 1; -THByteTensor *arg22 = NULL; -THByteTensor *arg23 = NULL; -unsigned char arg24 = 1; -unsigned char arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THByteTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THByteTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THByteTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THByteTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~2D ByteTensor~2D [(V|F)] | [*ByteTensor*] ByteTensor~3D ByteTensor~3D [(V|F)] | [*ByteTensor*] ByteTensor~3D ByteTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.ByteTensor"); -THByteTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.ByteTensor"); -THByteTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -unsigned char arg7 = 1; -unsigned char arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THByteTensor *arg11 = NULL; -int arg11_idx = 0; -unsigned char arg12 = 0; -unsigned char arg13 = 1; -THByteTensor *arg14 = NULL; -THByteTensor *arg15 = NULL; -unsigned char arg16 = 1; -unsigned char arg17 = 1; -unsigned char arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THByteTensor *arg21 = NULL; -int arg21_idx = 0; -unsigned char arg22 = 0; -unsigned char arg23 = 1; -THByteTensor *arg24 = NULL; -THByteTensor *arg25 = NULL; -unsigned char arg26 = 1; -unsigned char arg27 = 1; -unsigned char arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THByteTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THByteTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THByteTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THByteTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~3D ByteTensor~3D [(V|F)] | [*ByteTensor*] ByteTensor~4D ByteTensor~4D [(V|F)] | [*ByteTensor*] ByteTensor~4D ByteTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.ByteTensor"); -THByteTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.ByteTensor"); -THByteTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -unsigned char arg2 = 0; -unsigned char arg3 = 1; -THByteTensor *arg4 = NULL; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 1; -unsigned char arg7 = 1; -unsigned char arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THByteTensor *arg11 = NULL; -int arg11_idx = 0; -unsigned char arg12 = 0; -unsigned char arg13 = 1; -THByteTensor *arg14 = NULL; -THByteTensor *arg15 = NULL; -unsigned char arg16 = 1; -unsigned char arg17 = 1; -unsigned char arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THByteTensor *arg21 = NULL; -int arg21_idx = 0; -unsigned char arg22 = 0; -unsigned char arg23 = 1; -THByteTensor *arg24 = NULL; -THByteTensor *arg25 = NULL; -unsigned char arg26 = 1; -unsigned char arg27 = 1; -unsigned char arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THByteTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THByteTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THByteTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THByteTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THByteTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ByteTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ByteTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor~3D ByteTensor~3D [(V|F)] | [*ByteTensor*] ByteTensor~4D ByteTensor~4D [(V|F)] | [*ByteTensor*] ByteTensor~4D ByteTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.ByteTensor"); -THByteTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.ByteTensor"); -THByteTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -unsigned char arg3 = 0; -THByteTensor *arg4 = NULL; -int arg4_idx = 0; -THByteTensor *arg5 = NULL; -unsigned char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THByteTensor *arg8 = NULL; -THByteTensor *arg9 = NULL; -THByteTensor *arg10 = NULL; -int arg10_idx = 0; -THByteTensor *arg11 = NULL; -THByteTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (unsigned char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (unsigned char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ByteTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ByteTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ByteTensor unsigned char | *ByteTensor* ByteTensor unsigned char | [*ByteTensor*] ByteTensor ByteTensor | *ByteTensor* ByteTensor ByteTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THByteTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THByteTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THByteTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THByteTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ByteTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THByteTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] ByteTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THByteTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_ByteTensor_all(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor", type_buf); -} -arg2 = THByteTensor_logicalall(arg1); -lua_pushboolean(L, arg2); -return 1; -} - -static int m_torch_ByteTensor_any(lua_State *L) -{ -int narg = lua_gettop(L); -THByteTensor *arg1 = NULL; -int arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ByteTensor", type_buf); -} -arg2 = THByteTensor_logicalany(arg1); -lua_pushboolean(L, arg2); -return 1; -} - -static const struct luaL_Reg m_torch_ByteTensorMath__ [] = { -{"zero", m_torch_ByteTensor_zero}, -{"fill", m_torch_ByteTensor_fill}, -{"zeros", m_torch_ByteTensor_zeros}, -{"ones", m_torch_ByteTensor_ones}, -{"reshape", m_torch_ByteTensor_reshape}, -{"gather", m_torch_ByteTensor_gather}, -{"scatter", m_torch_ByteTensor_scatter}, -{"dot", m_torch_ByteTensor_dot}, -{"equal", m_torch_ByteTensor_equal}, -{"add", m_torch_ByteTensor_add}, -{"csub", m_torch_ByteTensor_csub}, -{"mul", m_torch_ByteTensor_mul}, -{"div", m_torch_ByteTensor_div}, -{"lshift", m_torch_ByteTensor_lshift}, -{"rshift", m_torch_ByteTensor_rshift}, -{"fmod", m_torch_ByteTensor_fmod}, -{"remainder", m_torch_ByteTensor_remainder}, -{"bitand", m_torch_ByteTensor_bitand}, -{"bitor", m_torch_ByteTensor_bitor}, -{"bitxor", m_torch_ByteTensor_bitxor}, -{"mod", m_torch_ByteTensor_mod}, -{"clamp", m_torch_ByteTensor_clamp}, -{"match", m_torch_ByteTensor_match}, -{"cmul", m_torch_ByteTensor_cmul}, -{"cpow", m_torch_ByteTensor_cpow}, -{"cdiv", m_torch_ByteTensor_cdiv}, -{"clshift", m_torch_ByteTensor_clshift}, -{"crshift", m_torch_ByteTensor_crshift}, -{"cfmod", m_torch_ByteTensor_cfmod}, -{"cremainder", m_torch_ByteTensor_cremainder}, -{"cbitand", m_torch_ByteTensor_cbitand}, -{"cbitor", m_torch_ByteTensor_cbitor}, -{"cbitxor", m_torch_ByteTensor_cbitxor}, -{"cmod", m_torch_ByteTensor_cmod}, -{"addcmul", m_torch_ByteTensor_addcmul}, -{"addcdiv", m_torch_ByteTensor_addcdiv}, -{"mv", m_torch_ByteTensor_mv}, -{"mm", m_torch_ByteTensor_mm}, -{"bmm", m_torch_ByteTensor_bmm}, -{"ger", m_torch_ByteTensor_ger}, -{"addmv", m_torch_ByteTensor_addmv}, -{"addmm", m_torch_ByteTensor_addmm}, -{"addr", m_torch_ByteTensor_addr}, -{"addbmm", m_torch_ByteTensor_addbmm}, -{"baddbmm", m_torch_ByteTensor_baddbmm}, -{"numel", m_torch_ByteTensor_numel}, -{"cumsum", m_torch_ByteTensor_cumsum}, -{"cumprod", m_torch_ByteTensor_cumprod}, -{"sum", m_torch_ByteTensor_sum}, -{"prod", m_torch_ByteTensor_prod}, -{"min", m_torch_ByteTensor_min}, -{"max", m_torch_ByteTensor_max}, -{"cmin", m_torch_ByteTensor_cmin}, -{"cmax", m_torch_ByteTensor_cmax}, -{"trace", m_torch_ByteTensor_trace}, -{"cross", m_torch_ByteTensor_cross}, -{"diag", m_torch_ByteTensor_diag}, -{"eye", m_torch_ByteTensor_eye}, -{"range", m_torch_ByteTensor_range}, -{"randperm", m_torch_ByteTensor_randperm}, -{"sort", m_torch_ByteTensor_sort}, -{"topk", m_torch_ByteTensor_topk}, -{"kthvalue", m_torch_ByteTensor_kthvalue}, -{"mode", m_torch_ByteTensor_mode}, -{"median", m_torch_ByteTensor_median}, -{"tril", m_torch_ByteTensor_tril}, -{"triu", m_torch_ByteTensor_triu}, -{"cat", m_torch_ByteTensor_cat}, -{"random", m_torch_ByteTensor_random}, -{"geometric", m_torch_ByteTensor_geometric}, -{"bernoulli", m_torch_ByteTensor_bernoulli}, -{"squeeze", m_torch_ByteTensor_squeeze}, -{"sign", m_torch_ByteTensor_sign}, -{"conv2", m_torch_ByteTensor_conv2}, -{"xcorr2", m_torch_ByteTensor_xcorr2}, -{"conv3", m_torch_ByteTensor_conv3}, -{"xcorr3", m_torch_ByteTensor_xcorr3}, -{"lt", m_torch_ByteTensor_lt}, -{"gt", m_torch_ByteTensor_gt}, -{"le", m_torch_ByteTensor_le}, -{"ge", m_torch_ByteTensor_ge}, -{"eq", m_torch_ByteTensor_eq}, -{"ne", m_torch_ByteTensor_ne}, -{"nonzero", m_torch_ByteTensor_nonzero}, -{"all", m_torch_ByteTensor_all}, -{"any", m_torch_ByteTensor_any}, -{NULL, NULL} -}; - -static const struct luaL_Reg torch_ByteTensorMath__ [] = { -{"zero", torch_ByteTensor_zero}, -{"fill", torch_ByteTensor_fill}, -{"zeros", torch_ByteTensor_zeros}, -{"ones", torch_ByteTensor_ones}, -{"reshape", torch_ByteTensor_reshape}, -{"gather", torch_ByteTensor_gather}, -{"scatter", torch_ByteTensor_scatter}, -{"dot", torch_ByteTensor_dot}, -{"equal", torch_ByteTensor_equal}, -{"add", torch_ByteTensor_add}, -{"csub", torch_ByteTensor_csub}, -{"mul", torch_ByteTensor_mul}, -{"div", torch_ByteTensor_div}, -{"lshift", torch_ByteTensor_lshift}, -{"rshift", torch_ByteTensor_rshift}, -{"fmod", torch_ByteTensor_fmod}, -{"remainder", torch_ByteTensor_remainder}, -{"bitand", torch_ByteTensor_bitand}, -{"bitor", torch_ByteTensor_bitor}, -{"bitxor", torch_ByteTensor_bitxor}, -{"mod", torch_ByteTensor_mod}, -{"clamp", torch_ByteTensor_clamp}, -{"match", torch_ByteTensor_match}, -{"cmul", torch_ByteTensor_cmul}, -{"cpow", torch_ByteTensor_cpow}, -{"cdiv", torch_ByteTensor_cdiv}, -{"clshift", torch_ByteTensor_clshift}, -{"crshift", torch_ByteTensor_crshift}, -{"cfmod", torch_ByteTensor_cfmod}, -{"cremainder", torch_ByteTensor_cremainder}, -{"cbitand", torch_ByteTensor_cbitand}, -{"cbitor", torch_ByteTensor_cbitor}, -{"cbitxor", torch_ByteTensor_cbitxor}, -{"cmod", torch_ByteTensor_cmod}, -{"addcmul", torch_ByteTensor_addcmul}, -{"addcdiv", torch_ByteTensor_addcdiv}, -{"mv", torch_ByteTensor_mv}, -{"mm", torch_ByteTensor_mm}, -{"bmm", torch_ByteTensor_bmm}, -{"ger", torch_ByteTensor_ger}, -{"addmv", torch_ByteTensor_addmv}, -{"addmm", torch_ByteTensor_addmm}, -{"addr", torch_ByteTensor_addr}, -{"addbmm", torch_ByteTensor_addbmm}, -{"baddbmm", torch_ByteTensor_baddbmm}, -{"numel", torch_ByteTensor_numel}, -{"cumsum", torch_ByteTensor_cumsum}, -{"cumprod", torch_ByteTensor_cumprod}, -{"sum", torch_ByteTensor_sum}, -{"prod", torch_ByteTensor_prod}, -{"min", torch_ByteTensor_min}, -{"max", torch_ByteTensor_max}, -{"cmin", torch_ByteTensor_cmin}, -{"cmax", torch_ByteTensor_cmax}, -{"trace", torch_ByteTensor_trace}, -{"cross", torch_ByteTensor_cross}, -{"diag", torch_ByteTensor_diag}, -{"eye", torch_ByteTensor_eye}, -{"range", torch_ByteTensor_range}, -{"randperm", torch_ByteTensor_randperm}, -{"sort", torch_ByteTensor_sort}, -{"topk", torch_ByteTensor_topk}, -{"kthvalue", torch_ByteTensor_kthvalue}, -{"mode", torch_ByteTensor_mode}, -{"median", torch_ByteTensor_median}, -{"tril", torch_ByteTensor_tril}, -{"triu", torch_ByteTensor_triu}, -{"cat", torch_ByteTensor_cat}, -{"random", torch_ByteTensor_random}, -{"geometric", torch_ByteTensor_geometric}, -{"bernoulli", torch_ByteTensor_bernoulli}, -{"squeeze", torch_ByteTensor_squeeze}, -{"sign", torch_ByteTensor_sign}, -{"conv2", torch_ByteTensor_conv2}, -{"xcorr2", torch_ByteTensor_xcorr2}, -{"conv3", torch_ByteTensor_conv3}, -{"xcorr3", torch_ByteTensor_xcorr3}, -{"lt", torch_ByteTensor_lt}, -{"gt", torch_ByteTensor_gt}, -{"le", torch_ByteTensor_le}, -{"ge", torch_ByteTensor_ge}, -{"eq", torch_ByteTensor_eq}, -{"ne", torch_ByteTensor_ne}, -{"nonzero", torch_ByteTensor_nonzero}, -{"all", torch_ByteTensor_all}, -{"any", torch_ByteTensor_any}, -{NULL, NULL} -}; - -static void torch_ByteTensorMath_init(lua_State *L) -{ - luaT_pushmetatable(L, "torch.ByteTensor"); - - /* register methods */ - luaT_setfuncs(L, m_torch_ByteTensorMath__, 0); - - /* register functions into the "torch" field of the tensor metaclass */ - lua_pushstring(L, "torch"); - lua_newtable(L); - luaT_setfuncs(L, torch_ByteTensorMath__, 0); - lua_rawset(L, -3); - lua_pop(L, 1); -} - -static int torch_CharTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_zero(arg1); -return 1; -} - -static int torch_CharTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_fill(arg1,arg2); -return 1; -} - -static int torch_CharTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THCharTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_CharTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THCharTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_CharTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_CharTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THCharTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_CharTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -char arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* index LongTensor CharTensor | *CharTensor* index LongTensor char", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THCharTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THCharTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_CharTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -THCharTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor CharTensor", type_buf); -} -arg3 = THCharTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int torch_CharTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -THCharTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor CharTensor", type_buf); -} -arg3 = THCharTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int torch_CharTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 1; -THCharTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 2; -arg4 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 2; -arg6 = (char)lua_tonumber(L, 2); -arg4 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char | [*CharTensor*] CharTensor [char] CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.CharTensor"); -THCharTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_CharTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 1; -THCharTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 2; -arg4 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 2; -arg6 = (char)lua_tonumber(L, 2); -arg4 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char | [*CharTensor*] CharTensor [char] CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.CharTensor"); -THCharTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_CharTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_div(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -char arg4 = 0; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg4 = (char)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -arg4 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor char char", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_CharTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -char arg4 = 1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (char)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor [char]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_CharTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [char] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_CharTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -arg3 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [char] CharTensor CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_CharTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THCharTensor_new(); -THCharTensor_resize1d(arg1, arg5->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~2D CharTensor~1D", type_buf); -} -THCharTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_CharTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THCharTensor_new(); -THCharTensor_resize2d(arg1, arg5->size[0], arg6->size[1]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~2D CharTensor~2D", type_buf); -} -THCharTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_CharTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THCharTensor_new(); -THCharTensor_resize3d(arg1, arg5->size[0], arg5->size[1], arg6->size[2]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~3D CharTensor~3D", type_buf); -} -THCharTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_CharTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THCharTensor_new(); -THCharTensor_resize2d(arg1, arg5->size[0], arg6->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~1D CharTensor~1D", type_buf); -} -THCharTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_CharTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (char)lua_tonumber(L, 1); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (char)lua_tonumber(L, 1); -arg4 = (char)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -arg4 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [char] CharTensor~1D [char] CharTensor~2D CharTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_CharTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (char)lua_tonumber(L, 1); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg4 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (char)lua_tonumber(L, 1); -arg4 = (char)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -arg4 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [char] CharTensor~2D [char] CharTensor~2D CharTensor~2D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_CharTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (char)lua_tonumber(L, 1); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (char)lua_tonumber(L, 1); -arg4 = (char)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 6, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -arg4 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [char] CharTensor~2D [char] CharTensor~1D CharTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_CharTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (char)lua_tonumber(L, 1); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (char)lua_tonumber(L, 1); -arg4 = (char)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -arg4 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [char] CharTensor~2D [char] CharTensor~3D CharTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_CharTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (char)lua_tonumber(L, 1); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (char)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (char)lua_tonumber(L, 1); -arg4 = (char)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -arg4 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [char] CharTensor~3D [char] CharTensor~3D CharTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_CharTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor", type_buf); -} -arg2 = THCharTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_CharTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -long arg2 = 0; -THCharTensor *arg3 = NULL; -int arg3_idx = 0; -THCharTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor | [*CharTensor*] CharTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THCharTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.CharTensor"); -THCharTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_CharTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -long arg2 = 0; -THCharTensor *arg3 = NULL; -int arg3_idx = 0; -THCharTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor | [*CharTensor*] CharTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THCharTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.CharTensor"); -THCharTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_CharTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -char arg2 = 0; -THCharTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THCharTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor | [*CharTensor*] [*LongTensor*] CharTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THCharTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.CharTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THCharTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_CharTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -char arg2 = 0; -THCharTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THCharTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor | [*CharTensor*] [*LongTensor*] CharTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THCharTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.CharTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THCharTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_CharTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (char)lua_tonumber(L, 2); -arg4 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor | [*CharTensor*] CharTensor char", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.CharTensor"); -THCharTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_CharTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (char)lua_tonumber(L, 2); -arg4 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor | [*CharTensor*] CharTensor char", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.CharTensor"); -THCharTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_CharTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor", type_buf); -} -arg2 = THCharTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_CharTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_CharTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -long arg4 = 1; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] long long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_CharTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg3 = (long)lua_tonumber(L, 1); -arg1 = THCharTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [Generator] long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_randperm(arg1,arg2,arg3); - -THCharTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_CharTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THCharTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg2_idx = 1; -arg1 = THCharTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [*LongTensor*] CharTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THCharTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_CharTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THCharTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg2_idx = 1; -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THCharTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [*LongTensor*] CharTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THCharTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_CharTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THCharTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [*LongTensor*] CharTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THCharTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_CharTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THCharTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg2_idx = 1; -arg1 = THCharTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [*LongTensor*] CharTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THCharTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_CharTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THCharTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg2_idx = 1; -arg1 = THCharTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [*LongTensor*] CharTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THCharTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_CharTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int torch_CharTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -long arg4 = -2; -THCharTensor *arg5 = NULL; -int arg5_idx = 0; -THCharTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THCharTensor**)THAlloc(arg6_size * sizeof(THCharTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.CharTensor"))) - luaL_error(L, "expected CharTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THCharTensor**)THAlloc(arg6_size * sizeof(THCharTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.CharTensor"))) - luaL_error(L, "expected CharTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THCharTensor**)THAlloc(arg6_size * sizeof(THCharTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.CharTensor"))) - luaL_error(L, "expected CharTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THCharTensor**)THAlloc(arg6_size * sizeof(THCharTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.CharTensor"))) - luaL_error(L, "expected CharTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor [index] | [*CharTensor*] {CharTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.CharTensor"); -THCharTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static void THCharTensor_random2__(THCharTensor *self, THGenerator *gen, long a, long b) -{ - THArgCheck(b >= a, 2, "upper bound must be larger than lower bound"); - TH_TENSOR_APPLY(char, self, *self_data = ((THRandom_random(gen) % (b+1-a)) + a);) -} - -static void THCharTensor_random1__(THCharTensor *self, THGenerator *gen, long b) -{ - THArgCheck(b > 0, 1, "upper bound must be strictly positive"); - TH_TENSOR_APPLY(char, self, *self_data = (THRandom_random(gen) % b + 1);) -} - -static int torch_CharTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THCharTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THCharTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *CharTensor* [Generator] long long | *CharTensor* [Generator] long | *CharTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THCharTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THCharTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int torch_CharTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *CharTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_CharTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THCharTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *CharTensor* [Generator] [double] | *CharTensor* [Generator] FloatTensor | *CharTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THCharTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_CharTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -int arg3_idx = 0; -THCharTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor | [*CharTensor*] CharTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THCharTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.CharTensor"); -{int hasdims = arg4->nDimension > 1; -THCharTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THCharTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int torch_CharTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_sign(arg1,arg2); -return 1; -} - -static int torch_CharTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -char arg6 = 1; -char arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -char arg11 = 0; -char arg12 = 1; -THCharTensor *arg13 = NULL; -THCharTensor *arg14 = NULL; -char arg15 = 1; -char arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THCharTensor *arg19 = NULL; -int arg19_idx = 0; -char arg20 = 0; -char arg21 = 1; -THCharTensor *arg22 = NULL; -THCharTensor *arg23 = NULL; -char arg24 = 1; -char arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THCharTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THCharTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THCharTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THCharTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~2D CharTensor~2D [(V|F)] | [*CharTensor*] CharTensor~3D CharTensor~3D [(V|F)] | [*CharTensor*] CharTensor~3D CharTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.CharTensor"); -THCharTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.CharTensor"); -THCharTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_CharTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -char arg6 = 1; -char arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -char arg11 = 0; -char arg12 = 1; -THCharTensor *arg13 = NULL; -THCharTensor *arg14 = NULL; -char arg15 = 1; -char arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THCharTensor *arg19 = NULL; -int arg19_idx = 0; -char arg20 = 0; -char arg21 = 1; -THCharTensor *arg22 = NULL; -THCharTensor *arg23 = NULL; -char arg24 = 1; -char arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THCharTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THCharTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THCharTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THCharTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~2D CharTensor~2D [(V|F)] | [*CharTensor*] CharTensor~3D CharTensor~3D [(V|F)] | [*CharTensor*] CharTensor~3D CharTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.CharTensor"); -THCharTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.CharTensor"); -THCharTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_CharTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -char arg6 = 1; -char arg7 = 1; -char arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THCharTensor *arg11 = NULL; -int arg11_idx = 0; -char arg12 = 0; -char arg13 = 1; -THCharTensor *arg14 = NULL; -THCharTensor *arg15 = NULL; -char arg16 = 1; -char arg17 = 1; -char arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THCharTensor *arg21 = NULL; -int arg21_idx = 0; -char arg22 = 0; -char arg23 = 1; -THCharTensor *arg24 = NULL; -THCharTensor *arg25 = NULL; -char arg26 = 1; -char arg27 = 1; -char arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THCharTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THCharTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THCharTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THCharTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~3D CharTensor~3D [(V|F)] | [*CharTensor*] CharTensor~4D CharTensor~4D [(V|F)] | [*CharTensor*] CharTensor~4D CharTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.CharTensor"); -THCharTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.CharTensor"); -THCharTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_CharTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -char arg6 = 1; -char arg7 = 1; -char arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THCharTensor *arg11 = NULL; -int arg11_idx = 0; -char arg12 = 0; -char arg13 = 1; -THCharTensor *arg14 = NULL; -THCharTensor *arg15 = NULL; -char arg16 = 1; -char arg17 = 1; -char arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THCharTensor *arg21 = NULL; -int arg21_idx = 0; -char arg22 = 0; -char arg23 = 1; -THCharTensor *arg24 = NULL; -THCharTensor *arg25 = NULL; -char arg26 = 1; -char arg27 = 1; -char arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THCharTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THCharTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THCharTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THCharTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~3D CharTensor~3D [(V|F)] | [*CharTensor*] CharTensor~4D CharTensor~4D [(V|F)] | [*CharTensor*] CharTensor~4D CharTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.CharTensor"); -THCharTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.CharTensor"); -THCharTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_CharTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_CharTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_CharTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_CharTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_CharTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_CharTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_CharTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THCharTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_CharTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_zero(arg1); -return 1; -} - -static int m_torch_CharTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (char)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_fill(arg1,arg2); -return 1; -} - -static int m_torch_CharTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_CharTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_CharTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_CharTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THCharTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_CharTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -char arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* index LongTensor CharTensor | *CharTensor* index LongTensor char", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THCharTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THCharTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -THCharTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor CharTensor", type_buf); -} -arg3 = THCharTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int m_torch_CharTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -THCharTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor CharTensor", type_buf); -} -arg3 = THCharTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int m_torch_CharTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 1; -THCharTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char | *CharTensor* [CharTensor] [char] CharTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THCharTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 1; -THCharTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char | *CharTensor* [CharTensor] [char] CharTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THCharTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_div(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -char arg4 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg4 = (char)lua_tonumber(L, 3); -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -arg4 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] char char", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_CharTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -char arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* CharTensor CharTensor [char]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_CharTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] [char] CharTensor CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_CharTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor] [char] CharTensor CharTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_CharTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* CharTensor~2D CharTensor~1D", type_buf); -} -THCharTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THCharTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_CharTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* CharTensor~2D CharTensor~2D", type_buf); -} -THCharTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THCharTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_CharTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* CharTensor~3D CharTensor~3D", type_buf); -} -THCharTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THCharTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_CharTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* CharTensor~1D CharTensor~1D", type_buf); -} -THCharTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THCharTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_CharTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -THCharTensor *arg7 = NULL; -int arg7_idx = 0; -char arg8 = 0; -THCharTensor *arg9 = NULL; -char arg10 = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (char)lua_tonumber(L, 2); -arg10 = (char)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg9->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.CharTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (char)lua_tonumber(L, 2); -arg10 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor~1D* [CharTensor~1D] [char] CharTensor~2D CharTensor~1D | *CharTensor~1D* char [CharTensor~1D] char CharTensor~2D CharTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THCharTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THCharTensor_addmv(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -THCharTensor *arg7 = NULL; -int arg7_idx = 0; -char arg8 = 0; -THCharTensor *arg9 = NULL; -char arg10 = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (char)lua_tonumber(L, 2); -arg10 = (char)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.CharTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (char)lua_tonumber(L, 2); -arg10 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor~2D* [CharTensor~2D] [char] CharTensor~2D CharTensor~2D | *CharTensor~2D* char [CharTensor~2D] char CharTensor~2D CharTensor~2D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THCharTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THCharTensor_addmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -THCharTensor *arg7 = NULL; -int arg7_idx = 0; -char arg8 = 0; -THCharTensor *arg9 = NULL; -char arg10 = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (char)lua_tonumber(L, 2); -arg10 = (char)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 6, "torch.CharTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (char)lua_tonumber(L, 2); -arg10 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor~2D* [CharTensor~2D] [char] CharTensor~1D CharTensor~1D | *CharTensor~2D* char [CharTensor~2D] char CharTensor~1D CharTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THCharTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THCharTensor_addr(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -THCharTensor *arg7 = NULL; -int arg7_idx = 0; -char arg8 = 0; -THCharTensor *arg9 = NULL; -char arg10 = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (char)lua_tonumber(L, 2); -arg10 = (char)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.CharTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (char)lua_tonumber(L, 2); -arg10 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor~2D* [CharTensor~2D] [char] CharTensor~3D CharTensor~3D | *CharTensor~2D* char [CharTensor~2D] char CharTensor~3D CharTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THCharTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THCharTensor_addbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 1; -THCharTensor *arg3 = NULL; -char arg4 = 1; -THCharTensor *arg5 = NULL; -THCharTensor *arg6 = NULL; -THCharTensor *arg7 = NULL; -int arg7_idx = 0; -char arg8 = 0; -THCharTensor *arg9 = NULL; -char arg10 = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (char)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.CharTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (char)lua_tonumber(L, 2); -arg10 = (char)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg9->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.CharTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.CharTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (char)lua_tonumber(L, 2); -arg10 = (char)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor~3D* [CharTensor~3D] [char] CharTensor~3D CharTensor~3D | *CharTensor~3D* char [CharTensor~3D] char CharTensor~3D CharTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THCharTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THCharTensor_baddbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor", type_buf); -} -arg2 = THCharTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_CharTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -long arg2 = 0; -THCharTensor *arg3 = NULL; -int arg3_idx = 0; -THCharTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor | [*CharTensor*] CharTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THCharTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.CharTensor"); -THCharTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -long arg2 = 0; -THCharTensor *arg3 = NULL; -int arg3_idx = 0; -THCharTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor | [*CharTensor*] CharTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THCharTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.CharTensor"); -THCharTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -char arg2 = 0; -THCharTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THCharTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor | [*CharTensor*] [*LongTensor*] CharTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THCharTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.CharTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THCharTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_CharTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -char arg2 = 0; -THCharTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THCharTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor | [*CharTensor*] [*LongTensor*] CharTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THCharTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.CharTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THCharTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_CharTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (char)lua_tonumber(L, 1); -arg4 = THCharTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (char)lua_tonumber(L, 2); -arg4 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [CharTensor] CharTensor | [*CharTensor*] [CharTensor] char", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.CharTensor"); -THCharTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (char)lua_tonumber(L, 1); -arg4 = THCharTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (char)lua_tonumber(L, 2); -arg4 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [CharTensor] CharTensor | [*CharTensor*] [CharTensor] char", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.CharTensor"); -THCharTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: CharTensor", type_buf); -} -arg2 = THCharTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_CharTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_CharTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -long arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* long long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_CharTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [Generator] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_randperm(arg1,arg2,arg3); - -THCharTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_CharTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THCharTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg2_idx = 1; -arg1 = THCharTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [*LongTensor*] CharTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THCharTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_CharTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THCharTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg2_idx = 1; -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THCharTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THCharTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [*LongTensor*] CharTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THCharTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_CharTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THCharTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THCharTensor_new(); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [*LongTensor*] CharTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THCharTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_CharTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THCharTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg2_idx = 1; -arg1 = THCharTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [*LongTensor*] CharTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THCharTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_CharTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THCharTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg2_idx = 1; -arg1 = THCharTensor_new(); -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THCharTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THCharTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] [*LongTensor*] CharTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THCharTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_CharTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int m_torch_CharTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -long arg4 = -2; -THCharTensor *arg5 = NULL; -int arg5_idx = 0; -THCharTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THCharTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.CharTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THCharTensor**)THAlloc(arg6_size * sizeof(THCharTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.CharTensor"))) - luaL_error(L, "expected CharTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THCharTensor**)THAlloc(arg6_size * sizeof(THCharTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.CharTensor"))) - luaL_error(L, "expected CharTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THCharTensor**)THAlloc(arg6_size * sizeof(THCharTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.CharTensor"))) - luaL_error(L, "expected CharTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.CharTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THCharTensor**)THAlloc(arg6_size * sizeof(THCharTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.CharTensor"))) - luaL_error(L, "expected CharTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor CharTensor [index] | [*CharTensor*] {CharTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.CharTensor"); -THCharTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THCharTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THCharTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *CharTensor* [Generator] long long | *CharTensor* [Generator] long | *CharTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THCharTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THCharTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *CharTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THCharTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *CharTensor* [Generator] [double] | *CharTensor* [Generator] FloatTensor | *CharTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THCharTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -THCharTensor *arg3 = NULL; -int arg3_idx = 0; -THCharTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THCharTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor | [*CharTensor*] CharTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THCharTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.CharTensor"); -{int hasdims = arg4->nDimension > 1; -THCharTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THCharTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int m_torch_CharTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *CharTensor* [CharTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THCharTensor_sign(arg1,arg2); -return 1; -} - -static int m_torch_CharTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -char arg6 = 1; -char arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -char arg11 = 0; -char arg12 = 1; -THCharTensor *arg13 = NULL; -THCharTensor *arg14 = NULL; -char arg15 = 1; -char arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THCharTensor *arg19 = NULL; -int arg19_idx = 0; -char arg20 = 0; -char arg21 = 1; -THCharTensor *arg22 = NULL; -THCharTensor *arg23 = NULL; -char arg24 = 1; -char arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THCharTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THCharTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THCharTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THCharTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~2D CharTensor~2D [(V|F)] | [*CharTensor*] CharTensor~3D CharTensor~3D [(V|F)] | [*CharTensor*] CharTensor~3D CharTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.CharTensor"); -THCharTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.CharTensor"); -THCharTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -char arg6 = 1; -char arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -char arg11 = 0; -char arg12 = 1; -THCharTensor *arg13 = NULL; -THCharTensor *arg14 = NULL; -char arg15 = 1; -char arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THCharTensor *arg19 = NULL; -int arg19_idx = 0; -char arg20 = 0; -char arg21 = 1; -THCharTensor *arg22 = NULL; -THCharTensor *arg23 = NULL; -char arg24 = 1; -char arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THCharTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THCharTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THCharTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THCharTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~2D CharTensor~2D [(V|F)] | [*CharTensor*] CharTensor~3D CharTensor~3D [(V|F)] | [*CharTensor*] CharTensor~3D CharTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.CharTensor"); -THCharTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.CharTensor"); -THCharTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -char arg6 = 1; -char arg7 = 1; -char arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THCharTensor *arg11 = NULL; -int arg11_idx = 0; -char arg12 = 0; -char arg13 = 1; -THCharTensor *arg14 = NULL; -THCharTensor *arg15 = NULL; -char arg16 = 1; -char arg17 = 1; -char arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THCharTensor *arg21 = NULL; -int arg21_idx = 0; -char arg22 = 0; -char arg23 = 1; -THCharTensor *arg24 = NULL; -THCharTensor *arg25 = NULL; -char arg26 = 1; -char arg27 = 1; -char arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THCharTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THCharTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THCharTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THCharTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~3D CharTensor~3D [(V|F)] | [*CharTensor*] CharTensor~4D CharTensor~4D [(V|F)] | [*CharTensor*] CharTensor~4D CharTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.CharTensor"); -THCharTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.CharTensor"); -THCharTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THCharTensor *arg1 = NULL; -int arg1_idx = 0; -char arg2 = 0; -char arg3 = 1; -THCharTensor *arg4 = NULL; -THCharTensor *arg5 = NULL; -char arg6 = 1; -char arg7 = 1; -char arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THCharTensor *arg11 = NULL; -int arg11_idx = 0; -char arg12 = 0; -char arg13 = 1; -THCharTensor *arg14 = NULL; -THCharTensor *arg15 = NULL; -char arg16 = 1; -char arg17 = 1; -char arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THCharTensor *arg21 = NULL; -int arg21_idx = 0; -char arg22 = 0; -char arg23 = 1; -THCharTensor *arg24 = NULL; -THCharTensor *arg25 = NULL; -char arg26 = 1; -char arg27 = 1; -char arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THCharTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THCharTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THCharTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THCharTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THCharTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.CharTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.CharTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*CharTensor*] CharTensor~3D CharTensor~3D [(V|F)] | [*CharTensor*] CharTensor~4D CharTensor~4D [(V|F)] | [*CharTensor*] CharTensor~4D CharTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.CharTensor"); -THCharTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.CharTensor"); -THCharTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.CharTensor"); -THCharTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -char arg3 = 0; -THCharTensor *arg4 = NULL; -int arg4_idx = 0; -THCharTensor *arg5 = NULL; -char arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THCharTensor *arg8 = NULL; -THCharTensor *arg9 = NULL; -THCharTensor *arg10 = NULL; -int arg10_idx = 0; -THCharTensor *arg11 = NULL; -THCharTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (char)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (char)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.CharTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (char)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.CharTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.CharTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.CharTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] CharTensor char | *CharTensor* CharTensor char | [*ByteTensor*] CharTensor CharTensor | *CharTensor* CharTensor CharTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THCharTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THCharTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THCharTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THCharTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_CharTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THCharTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.CharTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.CharTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] CharTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THCharTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static const struct luaL_Reg m_torch_CharTensorMath__ [] = { -{"zero", m_torch_CharTensor_zero}, -{"fill", m_torch_CharTensor_fill}, -{"zeros", m_torch_CharTensor_zeros}, -{"ones", m_torch_CharTensor_ones}, -{"reshape", m_torch_CharTensor_reshape}, -{"gather", m_torch_CharTensor_gather}, -{"scatter", m_torch_CharTensor_scatter}, -{"dot", m_torch_CharTensor_dot}, -{"equal", m_torch_CharTensor_equal}, -{"add", m_torch_CharTensor_add}, -{"csub", m_torch_CharTensor_csub}, -{"mul", m_torch_CharTensor_mul}, -{"div", m_torch_CharTensor_div}, -{"lshift", m_torch_CharTensor_lshift}, -{"rshift", m_torch_CharTensor_rshift}, -{"fmod", m_torch_CharTensor_fmod}, -{"remainder", m_torch_CharTensor_remainder}, -{"bitand", m_torch_CharTensor_bitand}, -{"bitor", m_torch_CharTensor_bitor}, -{"bitxor", m_torch_CharTensor_bitxor}, -{"mod", m_torch_CharTensor_mod}, -{"clamp", m_torch_CharTensor_clamp}, -{"match", m_torch_CharTensor_match}, -{"cmul", m_torch_CharTensor_cmul}, -{"cpow", m_torch_CharTensor_cpow}, -{"cdiv", m_torch_CharTensor_cdiv}, -{"clshift", m_torch_CharTensor_clshift}, -{"crshift", m_torch_CharTensor_crshift}, -{"cfmod", m_torch_CharTensor_cfmod}, -{"cremainder", m_torch_CharTensor_cremainder}, -{"cbitand", m_torch_CharTensor_cbitand}, -{"cbitor", m_torch_CharTensor_cbitor}, -{"cbitxor", m_torch_CharTensor_cbitxor}, -{"cmod", m_torch_CharTensor_cmod}, -{"addcmul", m_torch_CharTensor_addcmul}, -{"addcdiv", m_torch_CharTensor_addcdiv}, -{"mv", m_torch_CharTensor_mv}, -{"mm", m_torch_CharTensor_mm}, -{"bmm", m_torch_CharTensor_bmm}, -{"ger", m_torch_CharTensor_ger}, -{"addmv", m_torch_CharTensor_addmv}, -{"addmm", m_torch_CharTensor_addmm}, -{"addr", m_torch_CharTensor_addr}, -{"addbmm", m_torch_CharTensor_addbmm}, -{"baddbmm", m_torch_CharTensor_baddbmm}, -{"numel", m_torch_CharTensor_numel}, -{"cumsum", m_torch_CharTensor_cumsum}, -{"cumprod", m_torch_CharTensor_cumprod}, -{"sum", m_torch_CharTensor_sum}, -{"prod", m_torch_CharTensor_prod}, -{"min", m_torch_CharTensor_min}, -{"max", m_torch_CharTensor_max}, -{"cmin", m_torch_CharTensor_cmin}, -{"cmax", m_torch_CharTensor_cmax}, -{"trace", m_torch_CharTensor_trace}, -{"cross", m_torch_CharTensor_cross}, -{"diag", m_torch_CharTensor_diag}, -{"eye", m_torch_CharTensor_eye}, -{"range", m_torch_CharTensor_range}, -{"randperm", m_torch_CharTensor_randperm}, -{"sort", m_torch_CharTensor_sort}, -{"topk", m_torch_CharTensor_topk}, -{"kthvalue", m_torch_CharTensor_kthvalue}, -{"mode", m_torch_CharTensor_mode}, -{"median", m_torch_CharTensor_median}, -{"tril", m_torch_CharTensor_tril}, -{"triu", m_torch_CharTensor_triu}, -{"cat", m_torch_CharTensor_cat}, -{"random", m_torch_CharTensor_random}, -{"geometric", m_torch_CharTensor_geometric}, -{"bernoulli", m_torch_CharTensor_bernoulli}, -{"squeeze", m_torch_CharTensor_squeeze}, -{"sign", m_torch_CharTensor_sign}, -{"conv2", m_torch_CharTensor_conv2}, -{"xcorr2", m_torch_CharTensor_xcorr2}, -{"conv3", m_torch_CharTensor_conv3}, -{"xcorr3", m_torch_CharTensor_xcorr3}, -{"lt", m_torch_CharTensor_lt}, -{"gt", m_torch_CharTensor_gt}, -{"le", m_torch_CharTensor_le}, -{"ge", m_torch_CharTensor_ge}, -{"eq", m_torch_CharTensor_eq}, -{"ne", m_torch_CharTensor_ne}, -{"nonzero", m_torch_CharTensor_nonzero}, -{NULL, NULL} -}; - -static const struct luaL_Reg torch_CharTensorMath__ [] = { -{"zero", torch_CharTensor_zero}, -{"fill", torch_CharTensor_fill}, -{"zeros", torch_CharTensor_zeros}, -{"ones", torch_CharTensor_ones}, -{"reshape", torch_CharTensor_reshape}, -{"gather", torch_CharTensor_gather}, -{"scatter", torch_CharTensor_scatter}, -{"dot", torch_CharTensor_dot}, -{"equal", torch_CharTensor_equal}, -{"add", torch_CharTensor_add}, -{"csub", torch_CharTensor_csub}, -{"mul", torch_CharTensor_mul}, -{"div", torch_CharTensor_div}, -{"lshift", torch_CharTensor_lshift}, -{"rshift", torch_CharTensor_rshift}, -{"fmod", torch_CharTensor_fmod}, -{"remainder", torch_CharTensor_remainder}, -{"bitand", torch_CharTensor_bitand}, -{"bitor", torch_CharTensor_bitor}, -{"bitxor", torch_CharTensor_bitxor}, -{"mod", torch_CharTensor_mod}, -{"clamp", torch_CharTensor_clamp}, -{"match", torch_CharTensor_match}, -{"cmul", torch_CharTensor_cmul}, -{"cpow", torch_CharTensor_cpow}, -{"cdiv", torch_CharTensor_cdiv}, -{"clshift", torch_CharTensor_clshift}, -{"crshift", torch_CharTensor_crshift}, -{"cfmod", torch_CharTensor_cfmod}, -{"cremainder", torch_CharTensor_cremainder}, -{"cbitand", torch_CharTensor_cbitand}, -{"cbitor", torch_CharTensor_cbitor}, -{"cbitxor", torch_CharTensor_cbitxor}, -{"cmod", torch_CharTensor_cmod}, -{"addcmul", torch_CharTensor_addcmul}, -{"addcdiv", torch_CharTensor_addcdiv}, -{"mv", torch_CharTensor_mv}, -{"mm", torch_CharTensor_mm}, -{"bmm", torch_CharTensor_bmm}, -{"ger", torch_CharTensor_ger}, -{"addmv", torch_CharTensor_addmv}, -{"addmm", torch_CharTensor_addmm}, -{"addr", torch_CharTensor_addr}, -{"addbmm", torch_CharTensor_addbmm}, -{"baddbmm", torch_CharTensor_baddbmm}, -{"numel", torch_CharTensor_numel}, -{"cumsum", torch_CharTensor_cumsum}, -{"cumprod", torch_CharTensor_cumprod}, -{"sum", torch_CharTensor_sum}, -{"prod", torch_CharTensor_prod}, -{"min", torch_CharTensor_min}, -{"max", torch_CharTensor_max}, -{"cmin", torch_CharTensor_cmin}, -{"cmax", torch_CharTensor_cmax}, -{"trace", torch_CharTensor_trace}, -{"cross", torch_CharTensor_cross}, -{"diag", torch_CharTensor_diag}, -{"eye", torch_CharTensor_eye}, -{"range", torch_CharTensor_range}, -{"randperm", torch_CharTensor_randperm}, -{"sort", torch_CharTensor_sort}, -{"topk", torch_CharTensor_topk}, -{"kthvalue", torch_CharTensor_kthvalue}, -{"mode", torch_CharTensor_mode}, -{"median", torch_CharTensor_median}, -{"tril", torch_CharTensor_tril}, -{"triu", torch_CharTensor_triu}, -{"cat", torch_CharTensor_cat}, -{"random", torch_CharTensor_random}, -{"geometric", torch_CharTensor_geometric}, -{"bernoulli", torch_CharTensor_bernoulli}, -{"squeeze", torch_CharTensor_squeeze}, -{"sign", torch_CharTensor_sign}, -{"conv2", torch_CharTensor_conv2}, -{"xcorr2", torch_CharTensor_xcorr2}, -{"conv3", torch_CharTensor_conv3}, -{"xcorr3", torch_CharTensor_xcorr3}, -{"lt", torch_CharTensor_lt}, -{"gt", torch_CharTensor_gt}, -{"le", torch_CharTensor_le}, -{"ge", torch_CharTensor_ge}, -{"eq", torch_CharTensor_eq}, -{"ne", torch_CharTensor_ne}, -{"nonzero", torch_CharTensor_nonzero}, -{NULL, NULL} -}; - -static void torch_CharTensorMath_init(lua_State *L) -{ - luaT_pushmetatable(L, "torch.CharTensor"); - - /* register methods */ - luaT_setfuncs(L, m_torch_CharTensorMath__, 0); - - /* register functions into the "torch" field of the tensor metaclass */ - lua_pushstring(L, "torch"); - lua_newtable(L); - luaT_setfuncs(L, torch_CharTensorMath__, 0); - lua_rawset(L, -3); - lua_pop(L, 1); -} - -static int torch_ShortTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_zero(arg1); -return 1; -} - -static int torch_ShortTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_fill(arg1,arg2); -return 1; -} - -static int torch_ShortTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THShortTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_ShortTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THShortTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_ShortTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_ShortTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THShortTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_ShortTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -short arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* index LongTensor ShortTensor | *ShortTensor* index LongTensor short", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THShortTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THShortTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_ShortTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -THShortTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor ShortTensor", type_buf); -} -arg3 = THShortTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int torch_ShortTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -THShortTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor ShortTensor", type_buf); -} -arg3 = THShortTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int torch_ShortTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 1; -THShortTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 2; -arg4 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 2; -arg6 = (short)lua_tonumber(L, 2); -arg4 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short | [*ShortTensor*] ShortTensor [short] ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ShortTensor"); -THShortTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_ShortTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 1; -THShortTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 2; -arg4 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 2; -arg6 = (short)lua_tonumber(L, 2); -arg4 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short | [*ShortTensor*] ShortTensor [short] ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ShortTensor"); -THShortTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_ShortTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_div(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -short arg4 = 0; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg4 = (short)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -arg4 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor short short", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_ShortTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -short arg4 = 1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (short)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor [short]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_ShortTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [short] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_ShortTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -arg3 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [short] ShortTensor ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_ShortTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THShortTensor_new(); -THShortTensor_resize1d(arg1, arg5->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~2D ShortTensor~1D", type_buf); -} -THShortTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_ShortTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THShortTensor_new(); -THShortTensor_resize2d(arg1, arg5->size[0], arg6->size[1]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~2D ShortTensor~2D", type_buf); -} -THShortTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_ShortTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THShortTensor_new(); -THShortTensor_resize3d(arg1, arg5->size[0], arg5->size[1], arg6->size[2]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~3D ShortTensor~3D", type_buf); -} -THShortTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_ShortTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THShortTensor_new(); -THShortTensor_resize2d(arg1, arg5->size[0], arg6->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~1D ShortTensor~1D", type_buf); -} -THShortTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_ShortTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (short)lua_tonumber(L, 1); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (short)lua_tonumber(L, 1); -arg4 = (short)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -arg4 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [short] ShortTensor~1D [short] ShortTensor~2D ShortTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_ShortTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (short)lua_tonumber(L, 1); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg4 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (short)lua_tonumber(L, 1); -arg4 = (short)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -arg4 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [short] ShortTensor~2D [short] ShortTensor~2D ShortTensor~2D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_ShortTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (short)lua_tonumber(L, 1); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (short)lua_tonumber(L, 1); -arg4 = (short)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 6, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -arg4 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [short] ShortTensor~2D [short] ShortTensor~1D ShortTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_ShortTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (short)lua_tonumber(L, 1); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (short)lua_tonumber(L, 1); -arg4 = (short)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -arg4 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [short] ShortTensor~2D [short] ShortTensor~3D ShortTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_ShortTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (short)lua_tonumber(L, 1); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (short)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (short)lua_tonumber(L, 1); -arg4 = (short)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -arg4 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [short] ShortTensor~3D [short] ShortTensor~3D ShortTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_ShortTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor", type_buf); -} -arg2 = THShortTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_ShortTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -long arg2 = 0; -THShortTensor *arg3 = NULL; -int arg3_idx = 0; -THShortTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor | [*ShortTensor*] ShortTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THShortTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ShortTensor"); -THShortTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_ShortTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -long arg2 = 0; -THShortTensor *arg3 = NULL; -int arg3_idx = 0; -THShortTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor | [*ShortTensor*] ShortTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THShortTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ShortTensor"); -THShortTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_ShortTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -short arg2 = 0; -THShortTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THShortTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor | [*ShortTensor*] [*LongTensor*] ShortTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THShortTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ShortTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THShortTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_ShortTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -short arg2 = 0; -THShortTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THShortTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor | [*ShortTensor*] [*LongTensor*] ShortTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THShortTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ShortTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THShortTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_ShortTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (short)lua_tonumber(L, 2); -arg4 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor | [*ShortTensor*] ShortTensor short", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ShortTensor"); -THShortTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_ShortTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (short)lua_tonumber(L, 2); -arg4 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor | [*ShortTensor*] ShortTensor short", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ShortTensor"); -THShortTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_ShortTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor", type_buf); -} -arg2 = THShortTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_ShortTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_ShortTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -long arg4 = 1; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] long long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_ShortTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg3 = (long)lua_tonumber(L, 1); -arg1 = THShortTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [Generator] long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_randperm(arg1,arg2,arg3); - -THShortTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_ShortTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THShortTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg2_idx = 1; -arg1 = THShortTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [*LongTensor*] ShortTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THShortTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_ShortTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THShortTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg2_idx = 1; -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THShortTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [*LongTensor*] ShortTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THShortTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_ShortTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THShortTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [*LongTensor*] ShortTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THShortTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_ShortTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THShortTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg2_idx = 1; -arg1 = THShortTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [*LongTensor*] ShortTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THShortTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_ShortTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THShortTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg2_idx = 1; -arg1 = THShortTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [*LongTensor*] ShortTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THShortTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_ShortTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int torch_ShortTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -long arg4 = -2; -THShortTensor *arg5 = NULL; -int arg5_idx = 0; -THShortTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THShortTensor**)THAlloc(arg6_size * sizeof(THShortTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ShortTensor"))) - luaL_error(L, "expected ShortTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THShortTensor**)THAlloc(arg6_size * sizeof(THShortTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ShortTensor"))) - luaL_error(L, "expected ShortTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THShortTensor**)THAlloc(arg6_size * sizeof(THShortTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ShortTensor"))) - luaL_error(L, "expected ShortTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THShortTensor**)THAlloc(arg6_size * sizeof(THShortTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ShortTensor"))) - luaL_error(L, "expected ShortTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor [index] | [*ShortTensor*] {ShortTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.ShortTensor"); -THShortTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static void THShortTensor_random2__(THShortTensor *self, THGenerator *gen, long a, long b) -{ - THArgCheck(b >= a, 2, "upper bound must be larger than lower bound"); - TH_TENSOR_APPLY(short, self, *self_data = ((THRandom_random(gen) % (b+1-a)) + a);) -} - -static void THShortTensor_random1__(THShortTensor *self, THGenerator *gen, long b) -{ - THArgCheck(b > 0, 1, "upper bound must be strictly positive"); - TH_TENSOR_APPLY(short, self, *self_data = (THRandom_random(gen) % b + 1);) -} - -static int torch_ShortTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THShortTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THShortTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *ShortTensor* [Generator] long long | *ShortTensor* [Generator] long | *ShortTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THShortTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THShortTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int torch_ShortTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *ShortTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_ShortTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THShortTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *ShortTensor* [Generator] [double] | *ShortTensor* [Generator] FloatTensor | *ShortTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THShortTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_ShortTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -int arg3_idx = 0; -THShortTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor | [*ShortTensor*] ShortTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THShortTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ShortTensor"); -{int hasdims = arg4->nDimension > 1; -THShortTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THShortTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int torch_ShortTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_sign(arg1,arg2); -return 1; -} - -static int torch_ShortTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -short arg6 = 1; -short arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -short arg11 = 0; -short arg12 = 1; -THShortTensor *arg13 = NULL; -THShortTensor *arg14 = NULL; -short arg15 = 1; -short arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THShortTensor *arg19 = NULL; -int arg19_idx = 0; -short arg20 = 0; -short arg21 = 1; -THShortTensor *arg22 = NULL; -THShortTensor *arg23 = NULL; -short arg24 = 1; -short arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THShortTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THShortTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THShortTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THShortTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~2D ShortTensor~2D [(V|F)] | [*ShortTensor*] ShortTensor~3D ShortTensor~3D [(V|F)] | [*ShortTensor*] ShortTensor~3D ShortTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.ShortTensor"); -THShortTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.ShortTensor"); -THShortTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_ShortTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -short arg6 = 1; -short arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -short arg11 = 0; -short arg12 = 1; -THShortTensor *arg13 = NULL; -THShortTensor *arg14 = NULL; -short arg15 = 1; -short arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THShortTensor *arg19 = NULL; -int arg19_idx = 0; -short arg20 = 0; -short arg21 = 1; -THShortTensor *arg22 = NULL; -THShortTensor *arg23 = NULL; -short arg24 = 1; -short arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THShortTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THShortTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THShortTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THShortTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~2D ShortTensor~2D [(V|F)] | [*ShortTensor*] ShortTensor~3D ShortTensor~3D [(V|F)] | [*ShortTensor*] ShortTensor~3D ShortTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.ShortTensor"); -THShortTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.ShortTensor"); -THShortTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_ShortTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -short arg6 = 1; -short arg7 = 1; -short arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THShortTensor *arg11 = NULL; -int arg11_idx = 0; -short arg12 = 0; -short arg13 = 1; -THShortTensor *arg14 = NULL; -THShortTensor *arg15 = NULL; -short arg16 = 1; -short arg17 = 1; -short arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THShortTensor *arg21 = NULL; -int arg21_idx = 0; -short arg22 = 0; -short arg23 = 1; -THShortTensor *arg24 = NULL; -THShortTensor *arg25 = NULL; -short arg26 = 1; -short arg27 = 1; -short arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THShortTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THShortTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THShortTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THShortTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~3D ShortTensor~3D [(V|F)] | [*ShortTensor*] ShortTensor~4D ShortTensor~4D [(V|F)] | [*ShortTensor*] ShortTensor~4D ShortTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.ShortTensor"); -THShortTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.ShortTensor"); -THShortTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_ShortTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -short arg6 = 1; -short arg7 = 1; -short arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THShortTensor *arg11 = NULL; -int arg11_idx = 0; -short arg12 = 0; -short arg13 = 1; -THShortTensor *arg14 = NULL; -THShortTensor *arg15 = NULL; -short arg16 = 1; -short arg17 = 1; -short arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THShortTensor *arg21 = NULL; -int arg21_idx = 0; -short arg22 = 0; -short arg23 = 1; -THShortTensor *arg24 = NULL; -THShortTensor *arg25 = NULL; -short arg26 = 1; -short arg27 = 1; -short arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THShortTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THShortTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THShortTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THShortTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~3D ShortTensor~3D [(V|F)] | [*ShortTensor*] ShortTensor~4D ShortTensor~4D [(V|F)] | [*ShortTensor*] ShortTensor~4D ShortTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.ShortTensor"); -THShortTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.ShortTensor"); -THShortTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_ShortTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_ShortTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_ShortTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_ShortTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_ShortTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_ShortTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_ShortTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THShortTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_ShortTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_zero(arg1); -return 1; -} - -static int m_torch_ShortTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (short)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_fill(arg1,arg2); -return 1; -} - -static int m_torch_ShortTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_ShortTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_ShortTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_ShortTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THShortTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_ShortTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -short arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* index LongTensor ShortTensor | *ShortTensor* index LongTensor short", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THShortTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THShortTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -THShortTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor ShortTensor", type_buf); -} -arg3 = THShortTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int m_torch_ShortTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -THShortTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor ShortTensor", type_buf); -} -arg3 = THShortTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int m_torch_ShortTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 1; -THShortTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short | *ShortTensor* [ShortTensor] [short] ShortTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THShortTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 1; -THShortTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short | *ShortTensor* [ShortTensor] [short] ShortTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THShortTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_div(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -short arg4 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg4 = (short)lua_tonumber(L, 3); -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -arg4 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] short short", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_ShortTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -short arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* ShortTensor ShortTensor [short]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_ShortTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] [short] ShortTensor ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_ShortTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor] [short] ShortTensor ShortTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_ShortTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* ShortTensor~2D ShortTensor~1D", type_buf); -} -THShortTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THShortTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_ShortTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* ShortTensor~2D ShortTensor~2D", type_buf); -} -THShortTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THShortTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_ShortTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* ShortTensor~3D ShortTensor~3D", type_buf); -} -THShortTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THShortTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_ShortTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* ShortTensor~1D ShortTensor~1D", type_buf); -} -THShortTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THShortTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_ShortTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -THShortTensor *arg7 = NULL; -int arg7_idx = 0; -short arg8 = 0; -THShortTensor *arg9 = NULL; -short arg10 = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (short)lua_tonumber(L, 2); -arg10 = (short)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg9->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.ShortTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (short)lua_tonumber(L, 2); -arg10 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor~1D* [ShortTensor~1D] [short] ShortTensor~2D ShortTensor~1D | *ShortTensor~1D* short [ShortTensor~1D] short ShortTensor~2D ShortTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THShortTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THShortTensor_addmv(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -THShortTensor *arg7 = NULL; -int arg7_idx = 0; -short arg8 = 0; -THShortTensor *arg9 = NULL; -short arg10 = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (short)lua_tonumber(L, 2); -arg10 = (short)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.ShortTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (short)lua_tonumber(L, 2); -arg10 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor~2D* [ShortTensor~2D] [short] ShortTensor~2D ShortTensor~2D | *ShortTensor~2D* short [ShortTensor~2D] short ShortTensor~2D ShortTensor~2D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THShortTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THShortTensor_addmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -THShortTensor *arg7 = NULL; -int arg7_idx = 0; -short arg8 = 0; -THShortTensor *arg9 = NULL; -short arg10 = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (short)lua_tonumber(L, 2); -arg10 = (short)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 6, "torch.ShortTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (short)lua_tonumber(L, 2); -arg10 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor~2D* [ShortTensor~2D] [short] ShortTensor~1D ShortTensor~1D | *ShortTensor~2D* short [ShortTensor~2D] short ShortTensor~1D ShortTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THShortTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THShortTensor_addr(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -THShortTensor *arg7 = NULL; -int arg7_idx = 0; -short arg8 = 0; -THShortTensor *arg9 = NULL; -short arg10 = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (short)lua_tonumber(L, 2); -arg10 = (short)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.ShortTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (short)lua_tonumber(L, 2); -arg10 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor~2D* [ShortTensor~2D] [short] ShortTensor~3D ShortTensor~3D | *ShortTensor~2D* short [ShortTensor~2D] short ShortTensor~3D ShortTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THShortTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THShortTensor_addbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 1; -THShortTensor *arg3 = NULL; -short arg4 = 1; -THShortTensor *arg5 = NULL; -THShortTensor *arg6 = NULL; -THShortTensor *arg7 = NULL; -int arg7_idx = 0; -short arg8 = 0; -THShortTensor *arg9 = NULL; -short arg10 = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (short)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.ShortTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (short)lua_tonumber(L, 2); -arg10 = (short)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg9->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.ShortTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.ShortTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (short)lua_tonumber(L, 2); -arg10 = (short)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor~3D* [ShortTensor~3D] [short] ShortTensor~3D ShortTensor~3D | *ShortTensor~3D* short [ShortTensor~3D] short ShortTensor~3D ShortTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THShortTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THShortTensor_baddbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor", type_buf); -} -arg2 = THShortTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_ShortTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -long arg2 = 0; -THShortTensor *arg3 = NULL; -int arg3_idx = 0; -THShortTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor | [*ShortTensor*] ShortTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THShortTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ShortTensor"); -THShortTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -long arg2 = 0; -THShortTensor *arg3 = NULL; -int arg3_idx = 0; -THShortTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor | [*ShortTensor*] ShortTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THShortTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ShortTensor"); -THShortTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -short arg2 = 0; -THShortTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THShortTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor | [*ShortTensor*] [*LongTensor*] ShortTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THShortTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ShortTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THShortTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_ShortTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -short arg2 = 0; -THShortTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THShortTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor | [*ShortTensor*] [*LongTensor*] ShortTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THShortTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ShortTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THShortTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_ShortTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (short)lua_tonumber(L, 1); -arg4 = THShortTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (short)lua_tonumber(L, 2); -arg4 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [ShortTensor] ShortTensor | [*ShortTensor*] [ShortTensor] short", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ShortTensor"); -THShortTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (short)lua_tonumber(L, 1); -arg4 = THShortTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (short)lua_tonumber(L, 2); -arg4 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [ShortTensor] ShortTensor | [*ShortTensor*] [ShortTensor] short", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.ShortTensor"); -THShortTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: ShortTensor", type_buf); -} -arg2 = THShortTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_ShortTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_ShortTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -long arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* long long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_ShortTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [Generator] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_randperm(arg1,arg2,arg3); - -THShortTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_ShortTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THShortTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg2_idx = 1; -arg1 = THShortTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [*LongTensor*] ShortTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THShortTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_ShortTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THShortTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg2_idx = 1; -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THShortTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THShortTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [*LongTensor*] ShortTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THShortTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_ShortTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THShortTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THShortTensor_new(); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [*LongTensor*] ShortTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THShortTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_ShortTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THShortTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg2_idx = 1; -arg1 = THShortTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [*LongTensor*] ShortTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THShortTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_ShortTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THShortTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg2_idx = 1; -arg1 = THShortTensor_new(); -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THShortTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THShortTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] [*LongTensor*] ShortTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THShortTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_ShortTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int m_torch_ShortTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -long arg4 = -2; -THShortTensor *arg5 = NULL; -int arg5_idx = 0; -THShortTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THShortTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.ShortTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THShortTensor**)THAlloc(arg6_size * sizeof(THShortTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ShortTensor"))) - luaL_error(L, "expected ShortTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THShortTensor**)THAlloc(arg6_size * sizeof(THShortTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ShortTensor"))) - luaL_error(L, "expected ShortTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THShortTensor**)THAlloc(arg6_size * sizeof(THShortTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ShortTensor"))) - luaL_error(L, "expected ShortTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THShortTensor**)THAlloc(arg6_size * sizeof(THShortTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.ShortTensor"))) - luaL_error(L, "expected ShortTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor ShortTensor [index] | [*ShortTensor*] {ShortTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.ShortTensor"); -THShortTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THShortTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THShortTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *ShortTensor* [Generator] long long | *ShortTensor* [Generator] long | *ShortTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THShortTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THShortTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *ShortTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THShortTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *ShortTensor* [Generator] [double] | *ShortTensor* [Generator] FloatTensor | *ShortTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THShortTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -THShortTensor *arg3 = NULL; -int arg3_idx = 0; -THShortTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THShortTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor | [*ShortTensor*] ShortTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THShortTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.ShortTensor"); -{int hasdims = arg4->nDimension > 1; -THShortTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THShortTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *ShortTensor* [ShortTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THShortTensor_sign(arg1,arg2); -return 1; -} - -static int m_torch_ShortTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -short arg6 = 1; -short arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -short arg11 = 0; -short arg12 = 1; -THShortTensor *arg13 = NULL; -THShortTensor *arg14 = NULL; -short arg15 = 1; -short arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THShortTensor *arg19 = NULL; -int arg19_idx = 0; -short arg20 = 0; -short arg21 = 1; -THShortTensor *arg22 = NULL; -THShortTensor *arg23 = NULL; -short arg24 = 1; -short arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THShortTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THShortTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THShortTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THShortTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~2D ShortTensor~2D [(V|F)] | [*ShortTensor*] ShortTensor~3D ShortTensor~3D [(V|F)] | [*ShortTensor*] ShortTensor~3D ShortTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.ShortTensor"); -THShortTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.ShortTensor"); -THShortTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -short arg6 = 1; -short arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -short arg11 = 0; -short arg12 = 1; -THShortTensor *arg13 = NULL; -THShortTensor *arg14 = NULL; -short arg15 = 1; -short arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THShortTensor *arg19 = NULL; -int arg19_idx = 0; -short arg20 = 0; -short arg21 = 1; -THShortTensor *arg22 = NULL; -THShortTensor *arg23 = NULL; -short arg24 = 1; -short arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THShortTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THShortTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THShortTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THShortTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~2D ShortTensor~2D [(V|F)] | [*ShortTensor*] ShortTensor~3D ShortTensor~3D [(V|F)] | [*ShortTensor*] ShortTensor~3D ShortTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.ShortTensor"); -THShortTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.ShortTensor"); -THShortTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -short arg6 = 1; -short arg7 = 1; -short arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THShortTensor *arg11 = NULL; -int arg11_idx = 0; -short arg12 = 0; -short arg13 = 1; -THShortTensor *arg14 = NULL; -THShortTensor *arg15 = NULL; -short arg16 = 1; -short arg17 = 1; -short arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THShortTensor *arg21 = NULL; -int arg21_idx = 0; -short arg22 = 0; -short arg23 = 1; -THShortTensor *arg24 = NULL; -THShortTensor *arg25 = NULL; -short arg26 = 1; -short arg27 = 1; -short arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THShortTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THShortTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THShortTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THShortTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~3D ShortTensor~3D [(V|F)] | [*ShortTensor*] ShortTensor~4D ShortTensor~4D [(V|F)] | [*ShortTensor*] ShortTensor~4D ShortTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.ShortTensor"); -THShortTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.ShortTensor"); -THShortTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THShortTensor *arg1 = NULL; -int arg1_idx = 0; -short arg2 = 0; -short arg3 = 1; -THShortTensor *arg4 = NULL; -THShortTensor *arg5 = NULL; -short arg6 = 1; -short arg7 = 1; -short arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THShortTensor *arg11 = NULL; -int arg11_idx = 0; -short arg12 = 0; -short arg13 = 1; -THShortTensor *arg14 = NULL; -THShortTensor *arg15 = NULL; -short arg16 = 1; -short arg17 = 1; -short arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THShortTensor *arg21 = NULL; -int arg21_idx = 0; -short arg22 = 0; -short arg23 = 1; -THShortTensor *arg24 = NULL; -THShortTensor *arg25 = NULL; -short arg26 = 1; -short arg27 = 1; -short arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THShortTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THShortTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THShortTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THShortTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THShortTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.ShortTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.ShortTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ShortTensor*] ShortTensor~3D ShortTensor~3D [(V|F)] | [*ShortTensor*] ShortTensor~4D ShortTensor~4D [(V|F)] | [*ShortTensor*] ShortTensor~4D ShortTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ShortTensor"); -THShortTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.ShortTensor"); -THShortTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.ShortTensor"); -THShortTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -short arg3 = 0; -THShortTensor *arg4 = NULL; -int arg4_idx = 0; -THShortTensor *arg5 = NULL; -short arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THShortTensor *arg8 = NULL; -THShortTensor *arg9 = NULL; -THShortTensor *arg10 = NULL; -int arg10_idx = 0; -THShortTensor *arg11 = NULL; -THShortTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (short)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (short)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (short)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.ShortTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.ShortTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.ShortTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] ShortTensor short | *ShortTensor* ShortTensor short | [*ByteTensor*] ShortTensor ShortTensor | *ShortTensor* ShortTensor ShortTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THShortTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THShortTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THShortTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THShortTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_ShortTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THShortTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ShortTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.ShortTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] ShortTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THShortTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static const struct luaL_Reg m_torch_ShortTensorMath__ [] = { -{"zero", m_torch_ShortTensor_zero}, -{"fill", m_torch_ShortTensor_fill}, -{"zeros", m_torch_ShortTensor_zeros}, -{"ones", m_torch_ShortTensor_ones}, -{"reshape", m_torch_ShortTensor_reshape}, -{"gather", m_torch_ShortTensor_gather}, -{"scatter", m_torch_ShortTensor_scatter}, -{"dot", m_torch_ShortTensor_dot}, -{"equal", m_torch_ShortTensor_equal}, -{"add", m_torch_ShortTensor_add}, -{"csub", m_torch_ShortTensor_csub}, -{"mul", m_torch_ShortTensor_mul}, -{"div", m_torch_ShortTensor_div}, -{"lshift", m_torch_ShortTensor_lshift}, -{"rshift", m_torch_ShortTensor_rshift}, -{"fmod", m_torch_ShortTensor_fmod}, -{"remainder", m_torch_ShortTensor_remainder}, -{"bitand", m_torch_ShortTensor_bitand}, -{"bitor", m_torch_ShortTensor_bitor}, -{"bitxor", m_torch_ShortTensor_bitxor}, -{"mod", m_torch_ShortTensor_mod}, -{"clamp", m_torch_ShortTensor_clamp}, -{"match", m_torch_ShortTensor_match}, -{"cmul", m_torch_ShortTensor_cmul}, -{"cpow", m_torch_ShortTensor_cpow}, -{"cdiv", m_torch_ShortTensor_cdiv}, -{"clshift", m_torch_ShortTensor_clshift}, -{"crshift", m_torch_ShortTensor_crshift}, -{"cfmod", m_torch_ShortTensor_cfmod}, -{"cremainder", m_torch_ShortTensor_cremainder}, -{"cbitand", m_torch_ShortTensor_cbitand}, -{"cbitor", m_torch_ShortTensor_cbitor}, -{"cbitxor", m_torch_ShortTensor_cbitxor}, -{"cmod", m_torch_ShortTensor_cmod}, -{"addcmul", m_torch_ShortTensor_addcmul}, -{"addcdiv", m_torch_ShortTensor_addcdiv}, -{"mv", m_torch_ShortTensor_mv}, -{"mm", m_torch_ShortTensor_mm}, -{"bmm", m_torch_ShortTensor_bmm}, -{"ger", m_torch_ShortTensor_ger}, -{"addmv", m_torch_ShortTensor_addmv}, -{"addmm", m_torch_ShortTensor_addmm}, -{"addr", m_torch_ShortTensor_addr}, -{"addbmm", m_torch_ShortTensor_addbmm}, -{"baddbmm", m_torch_ShortTensor_baddbmm}, -{"numel", m_torch_ShortTensor_numel}, -{"cumsum", m_torch_ShortTensor_cumsum}, -{"cumprod", m_torch_ShortTensor_cumprod}, -{"sum", m_torch_ShortTensor_sum}, -{"prod", m_torch_ShortTensor_prod}, -{"min", m_torch_ShortTensor_min}, -{"max", m_torch_ShortTensor_max}, -{"cmin", m_torch_ShortTensor_cmin}, -{"cmax", m_torch_ShortTensor_cmax}, -{"trace", m_torch_ShortTensor_trace}, -{"cross", m_torch_ShortTensor_cross}, -{"diag", m_torch_ShortTensor_diag}, -{"eye", m_torch_ShortTensor_eye}, -{"range", m_torch_ShortTensor_range}, -{"randperm", m_torch_ShortTensor_randperm}, -{"sort", m_torch_ShortTensor_sort}, -{"topk", m_torch_ShortTensor_topk}, -{"kthvalue", m_torch_ShortTensor_kthvalue}, -{"mode", m_torch_ShortTensor_mode}, -{"median", m_torch_ShortTensor_median}, -{"tril", m_torch_ShortTensor_tril}, -{"triu", m_torch_ShortTensor_triu}, -{"cat", m_torch_ShortTensor_cat}, -{"random", m_torch_ShortTensor_random}, -{"geometric", m_torch_ShortTensor_geometric}, -{"bernoulli", m_torch_ShortTensor_bernoulli}, -{"squeeze", m_torch_ShortTensor_squeeze}, -{"sign", m_torch_ShortTensor_sign}, -{"conv2", m_torch_ShortTensor_conv2}, -{"xcorr2", m_torch_ShortTensor_xcorr2}, -{"conv3", m_torch_ShortTensor_conv3}, -{"xcorr3", m_torch_ShortTensor_xcorr3}, -{"lt", m_torch_ShortTensor_lt}, -{"gt", m_torch_ShortTensor_gt}, -{"le", m_torch_ShortTensor_le}, -{"ge", m_torch_ShortTensor_ge}, -{"eq", m_torch_ShortTensor_eq}, -{"ne", m_torch_ShortTensor_ne}, -{"nonzero", m_torch_ShortTensor_nonzero}, -{NULL, NULL} -}; - -static const struct luaL_Reg torch_ShortTensorMath__ [] = { -{"zero", torch_ShortTensor_zero}, -{"fill", torch_ShortTensor_fill}, -{"zeros", torch_ShortTensor_zeros}, -{"ones", torch_ShortTensor_ones}, -{"reshape", torch_ShortTensor_reshape}, -{"gather", torch_ShortTensor_gather}, -{"scatter", torch_ShortTensor_scatter}, -{"dot", torch_ShortTensor_dot}, -{"equal", torch_ShortTensor_equal}, -{"add", torch_ShortTensor_add}, -{"csub", torch_ShortTensor_csub}, -{"mul", torch_ShortTensor_mul}, -{"div", torch_ShortTensor_div}, -{"lshift", torch_ShortTensor_lshift}, -{"rshift", torch_ShortTensor_rshift}, -{"fmod", torch_ShortTensor_fmod}, -{"remainder", torch_ShortTensor_remainder}, -{"bitand", torch_ShortTensor_bitand}, -{"bitor", torch_ShortTensor_bitor}, -{"bitxor", torch_ShortTensor_bitxor}, -{"mod", torch_ShortTensor_mod}, -{"clamp", torch_ShortTensor_clamp}, -{"match", torch_ShortTensor_match}, -{"cmul", torch_ShortTensor_cmul}, -{"cpow", torch_ShortTensor_cpow}, -{"cdiv", torch_ShortTensor_cdiv}, -{"clshift", torch_ShortTensor_clshift}, -{"crshift", torch_ShortTensor_crshift}, -{"cfmod", torch_ShortTensor_cfmod}, -{"cremainder", torch_ShortTensor_cremainder}, -{"cbitand", torch_ShortTensor_cbitand}, -{"cbitor", torch_ShortTensor_cbitor}, -{"cbitxor", torch_ShortTensor_cbitxor}, -{"cmod", torch_ShortTensor_cmod}, -{"addcmul", torch_ShortTensor_addcmul}, -{"addcdiv", torch_ShortTensor_addcdiv}, -{"mv", torch_ShortTensor_mv}, -{"mm", torch_ShortTensor_mm}, -{"bmm", torch_ShortTensor_bmm}, -{"ger", torch_ShortTensor_ger}, -{"addmv", torch_ShortTensor_addmv}, -{"addmm", torch_ShortTensor_addmm}, -{"addr", torch_ShortTensor_addr}, -{"addbmm", torch_ShortTensor_addbmm}, -{"baddbmm", torch_ShortTensor_baddbmm}, -{"numel", torch_ShortTensor_numel}, -{"cumsum", torch_ShortTensor_cumsum}, -{"cumprod", torch_ShortTensor_cumprod}, -{"sum", torch_ShortTensor_sum}, -{"prod", torch_ShortTensor_prod}, -{"min", torch_ShortTensor_min}, -{"max", torch_ShortTensor_max}, -{"cmin", torch_ShortTensor_cmin}, -{"cmax", torch_ShortTensor_cmax}, -{"trace", torch_ShortTensor_trace}, -{"cross", torch_ShortTensor_cross}, -{"diag", torch_ShortTensor_diag}, -{"eye", torch_ShortTensor_eye}, -{"range", torch_ShortTensor_range}, -{"randperm", torch_ShortTensor_randperm}, -{"sort", torch_ShortTensor_sort}, -{"topk", torch_ShortTensor_topk}, -{"kthvalue", torch_ShortTensor_kthvalue}, -{"mode", torch_ShortTensor_mode}, -{"median", torch_ShortTensor_median}, -{"tril", torch_ShortTensor_tril}, -{"triu", torch_ShortTensor_triu}, -{"cat", torch_ShortTensor_cat}, -{"random", torch_ShortTensor_random}, -{"geometric", torch_ShortTensor_geometric}, -{"bernoulli", torch_ShortTensor_bernoulli}, -{"squeeze", torch_ShortTensor_squeeze}, -{"sign", torch_ShortTensor_sign}, -{"conv2", torch_ShortTensor_conv2}, -{"xcorr2", torch_ShortTensor_xcorr2}, -{"conv3", torch_ShortTensor_conv3}, -{"xcorr3", torch_ShortTensor_xcorr3}, -{"lt", torch_ShortTensor_lt}, -{"gt", torch_ShortTensor_gt}, -{"le", torch_ShortTensor_le}, -{"ge", torch_ShortTensor_ge}, -{"eq", torch_ShortTensor_eq}, -{"ne", torch_ShortTensor_ne}, -{"nonzero", torch_ShortTensor_nonzero}, -{NULL, NULL} -}; - -static void torch_ShortTensorMath_init(lua_State *L) -{ - luaT_pushmetatable(L, "torch.ShortTensor"); - - /* register methods */ - luaT_setfuncs(L, m_torch_ShortTensorMath__, 0); - - /* register functions into the "torch" field of the tensor metaclass */ - lua_pushstring(L, "torch"); - lua_newtable(L); - luaT_setfuncs(L, torch_ShortTensorMath__, 0); - lua_rawset(L, -3); - lua_pop(L, 1); -} - -static int torch_IntTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_zero(arg1); -return 1; -} - -static int torch_IntTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_fill(arg1,arg2); -return 1; -} - -static int torch_IntTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THIntTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_IntTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THIntTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_IntTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_IntTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THIntTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_IntTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -int arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* index LongTensor IntTensor | *IntTensor* index LongTensor int", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THIntTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THIntTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_IntTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -THIntTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor IntTensor", type_buf); -} -arg3 = THIntTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int torch_IntTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor IntTensor", type_buf); -} -arg3 = THIntTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int torch_IntTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 1; -THIntTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 2; -arg4 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 2; -arg6 = (int)lua_tonumber(L, 2); -arg4 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int | [*IntTensor*] IntTensor [int] IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.IntTensor"); -THIntTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_IntTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 1; -THIntTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 2; -arg4 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 2; -arg6 = (int)lua_tonumber(L, 2); -arg4 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int | [*IntTensor*] IntTensor [int] IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.IntTensor"); -THIntTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_IntTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_div(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -int arg4 = 0; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg4 = (int)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -arg4 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor int int", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_IntTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -int arg4 = 1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (int)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_IntTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [int] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_IntTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [int] IntTensor IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_IntTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THIntTensor_new(); -THIntTensor_resize1d(arg1, arg5->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~2D IntTensor~1D", type_buf); -} -THIntTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_IntTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THIntTensor_new(); -THIntTensor_resize2d(arg1, arg5->size[0], arg6->size[1]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~2D IntTensor~2D", type_buf); -} -THIntTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_IntTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THIntTensor_new(); -THIntTensor_resize3d(arg1, arg5->size[0], arg5->size[1], arg6->size[2]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~3D IntTensor~3D", type_buf); -} -THIntTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_IntTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THIntTensor_new(); -THIntTensor_resize2d(arg1, arg5->size[0], arg6->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~1D IntTensor~1D", type_buf); -} -THIntTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_IntTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (int)lua_tonumber(L, 1); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (int)lua_tonumber(L, 1); -arg4 = (int)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -arg4 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [int] IntTensor~1D [int] IntTensor~2D IntTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_IntTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (int)lua_tonumber(L, 1); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg4 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (int)lua_tonumber(L, 1); -arg4 = (int)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -arg4 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [int] IntTensor~2D [int] IntTensor~2D IntTensor~2D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_IntTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (int)lua_tonumber(L, 1); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (int)lua_tonumber(L, 1); -arg4 = (int)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 6, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -arg4 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [int] IntTensor~2D [int] IntTensor~1D IntTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_IntTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (int)lua_tonumber(L, 1); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (int)lua_tonumber(L, 1); -arg4 = (int)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -arg4 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [int] IntTensor~2D [int] IntTensor~3D IntTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_IntTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (int)lua_tonumber(L, 1); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (int)lua_tonumber(L, 1); -arg4 = (int)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -arg4 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [int] IntTensor~3D [int] IntTensor~3D IntTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_IntTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor", type_buf); -} -arg2 = THIntTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_IntTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -long arg2 = 0; -THIntTensor *arg3 = NULL; -int arg3_idx = 0; -THIntTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor | [*IntTensor*] IntTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THIntTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.IntTensor"); -THIntTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_IntTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -long arg2 = 0; -THIntTensor *arg3 = NULL; -int arg3_idx = 0; -THIntTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor | [*IntTensor*] IntTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THIntTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.IntTensor"); -THIntTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_IntTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg2 = 0; -THIntTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THIntTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor | [*IntTensor*] [*LongTensor*] IntTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THIntTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.IntTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THIntTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_IntTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg2 = 0; -THIntTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THIntTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor | [*IntTensor*] [*LongTensor*] IntTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THIntTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.IntTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THIntTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_IntTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (int)lua_tonumber(L, 2); -arg4 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor | [*IntTensor*] IntTensor int", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.IntTensor"); -THIntTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_IntTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (int)lua_tonumber(L, 2); -arg4 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor | [*IntTensor*] IntTensor int", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.IntTensor"); -THIntTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_IntTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor", type_buf); -} -arg2 = THIntTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_IntTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_IntTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -long arg4 = 1; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] long long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_IntTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg3 = (long)lua_tonumber(L, 1); -arg1 = THIntTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [Generator] long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_randperm(arg1,arg2,arg3); - -THIntTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_IntTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THIntTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg2_idx = 1; -arg1 = THIntTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [*LongTensor*] IntTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THIntTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_IntTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THIntTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg2_idx = 1; -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THIntTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [*LongTensor*] IntTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THIntTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_IntTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THIntTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [*LongTensor*] IntTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THIntTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_IntTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THIntTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg2_idx = 1; -arg1 = THIntTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [*LongTensor*] IntTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THIntTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_IntTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THIntTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg2_idx = 1; -arg1 = THIntTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [*LongTensor*] IntTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THIntTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_IntTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int torch_IntTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -long arg4 = -2; -THIntTensor *arg5 = NULL; -int arg5_idx = 0; -THIntTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THIntTensor**)THAlloc(arg6_size * sizeof(THIntTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.IntTensor"))) - luaL_error(L, "expected IntTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THIntTensor**)THAlloc(arg6_size * sizeof(THIntTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.IntTensor"))) - luaL_error(L, "expected IntTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THIntTensor**)THAlloc(arg6_size * sizeof(THIntTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.IntTensor"))) - luaL_error(L, "expected IntTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THIntTensor**)THAlloc(arg6_size * sizeof(THIntTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.IntTensor"))) - luaL_error(L, "expected IntTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor [index] | [*IntTensor*] {IntTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.IntTensor"); -THIntTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static void THIntTensor_random2__(THIntTensor *self, THGenerator *gen, long a, long b) -{ - THArgCheck(b >= a, 2, "upper bound must be larger than lower bound"); - TH_TENSOR_APPLY(int, self, *self_data = ((THRandom_random(gen) % (b+1-a)) + a);) -} - -static void THIntTensor_random1__(THIntTensor *self, THGenerator *gen, long b) -{ - THArgCheck(b > 0, 1, "upper bound must be strictly positive"); - TH_TENSOR_APPLY(int, self, *self_data = (THRandom_random(gen) % b + 1);) -} - -static int torch_IntTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THIntTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THIntTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *IntTensor* [Generator] long long | *IntTensor* [Generator] long | *IntTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THIntTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THIntTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int torch_IntTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *IntTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_IntTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THIntTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *IntTensor* [Generator] [double] | *IntTensor* [Generator] FloatTensor | *IntTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THIntTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_IntTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -int arg3_idx = 0; -THIntTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor | [*IntTensor*] IntTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THIntTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.IntTensor"); -{int hasdims = arg4->nDimension > 1; -THIntTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THIntTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int torch_IntTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_sign(arg1,arg2); -return 1; -} - -static int torch_IntTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -int arg6 = 1; -int arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -int arg11 = 0; -int arg12 = 1; -THIntTensor *arg13 = NULL; -THIntTensor *arg14 = NULL; -int arg15 = 1; -int arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THIntTensor *arg19 = NULL; -int arg19_idx = 0; -int arg20 = 0; -int arg21 = 1; -THIntTensor *arg22 = NULL; -THIntTensor *arg23 = NULL; -int arg24 = 1; -int arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THIntTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THIntTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THIntTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THIntTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~2D IntTensor~2D [(V|F)] | [*IntTensor*] IntTensor~3D IntTensor~3D [(V|F)] | [*IntTensor*] IntTensor~3D IntTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.IntTensor"); -THIntTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.IntTensor"); -THIntTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_IntTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -int arg6 = 1; -int arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -int arg11 = 0; -int arg12 = 1; -THIntTensor *arg13 = NULL; -THIntTensor *arg14 = NULL; -int arg15 = 1; -int arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THIntTensor *arg19 = NULL; -int arg19_idx = 0; -int arg20 = 0; -int arg21 = 1; -THIntTensor *arg22 = NULL; -THIntTensor *arg23 = NULL; -int arg24 = 1; -int arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THIntTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THIntTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THIntTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THIntTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~2D IntTensor~2D [(V|F)] | [*IntTensor*] IntTensor~3D IntTensor~3D [(V|F)] | [*IntTensor*] IntTensor~3D IntTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.IntTensor"); -THIntTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.IntTensor"); -THIntTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_IntTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -int arg6 = 1; -int arg7 = 1; -int arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THIntTensor *arg11 = NULL; -int arg11_idx = 0; -int arg12 = 0; -int arg13 = 1; -THIntTensor *arg14 = NULL; -THIntTensor *arg15 = NULL; -int arg16 = 1; -int arg17 = 1; -int arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THIntTensor *arg21 = NULL; -int arg21_idx = 0; -int arg22 = 0; -int arg23 = 1; -THIntTensor *arg24 = NULL; -THIntTensor *arg25 = NULL; -int arg26 = 1; -int arg27 = 1; -int arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THIntTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THIntTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THIntTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THIntTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~3D IntTensor~3D [(V|F)] | [*IntTensor*] IntTensor~4D IntTensor~4D [(V|F)] | [*IntTensor*] IntTensor~4D IntTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.IntTensor"); -THIntTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.IntTensor"); -THIntTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_IntTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -int arg6 = 1; -int arg7 = 1; -int arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THIntTensor *arg11 = NULL; -int arg11_idx = 0; -int arg12 = 0; -int arg13 = 1; -THIntTensor *arg14 = NULL; -THIntTensor *arg15 = NULL; -int arg16 = 1; -int arg17 = 1; -int arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THIntTensor *arg21 = NULL; -int arg21_idx = 0; -int arg22 = 0; -int arg23 = 1; -THIntTensor *arg24 = NULL; -THIntTensor *arg25 = NULL; -int arg26 = 1; -int arg27 = 1; -int arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THIntTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THIntTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THIntTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THIntTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~3D IntTensor~3D [(V|F)] | [*IntTensor*] IntTensor~4D IntTensor~4D [(V|F)] | [*IntTensor*] IntTensor~4D IntTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.IntTensor"); -THIntTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.IntTensor"); -THIntTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_IntTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_IntTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_IntTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_IntTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_IntTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_IntTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_IntTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THIntTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_IntTensor_abs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -int arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (int)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor | int", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_abs(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = abs(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_abs(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "abs"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.abs() function", tname); - - return lua_gettop(L); -} - -static int m_torch_IntTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_zero(arg1); -return 1; -} - -static int m_torch_IntTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (int)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_fill(arg1,arg2); -return 1; -} - -static int m_torch_IntTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_IntTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_IntTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_IntTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THIntTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_IntTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -int arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* index LongTensor IntTensor | *IntTensor* index LongTensor int", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THIntTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THIntTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -THIntTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor IntTensor", type_buf); -} -arg3 = THIntTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int m_torch_IntTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor IntTensor", type_buf); -} -arg3 = THIntTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int m_torch_IntTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 1; -THIntTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int | *IntTensor* [IntTensor] [int] IntTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THIntTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 1; -THIntTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int | *IntTensor* [IntTensor] [int] IntTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THIntTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_div(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -int arg4 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg4 = (int)lua_tonumber(L, 3); -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -arg4 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] int int", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_IntTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -int arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* IntTensor IntTensor [int]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_IntTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] [int] IntTensor IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_IntTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] [int] IntTensor IntTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_IntTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* IntTensor~2D IntTensor~1D", type_buf); -} -THIntTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THIntTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_IntTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* IntTensor~2D IntTensor~2D", type_buf); -} -THIntTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THIntTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_IntTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* IntTensor~3D IntTensor~3D", type_buf); -} -THIntTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THIntTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_IntTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* IntTensor~1D IntTensor~1D", type_buf); -} -THIntTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THIntTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_IntTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -THIntTensor *arg7 = NULL; -int arg7_idx = 0; -int arg8 = 0; -THIntTensor *arg9 = NULL; -int arg10 = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (int)lua_tonumber(L, 2); -arg10 = (int)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg9->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.IntTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (int)lua_tonumber(L, 2); -arg10 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor~1D* [IntTensor~1D] [int] IntTensor~2D IntTensor~1D | *IntTensor~1D* int [IntTensor~1D] int IntTensor~2D IntTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THIntTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THIntTensor_addmv(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -THIntTensor *arg7 = NULL; -int arg7_idx = 0; -int arg8 = 0; -THIntTensor *arg9 = NULL; -int arg10 = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (int)lua_tonumber(L, 2); -arg10 = (int)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.IntTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (int)lua_tonumber(L, 2); -arg10 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor~2D* [IntTensor~2D] [int] IntTensor~2D IntTensor~2D | *IntTensor~2D* int [IntTensor~2D] int IntTensor~2D IntTensor~2D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THIntTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THIntTensor_addmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -THIntTensor *arg7 = NULL; -int arg7_idx = 0; -int arg8 = 0; -THIntTensor *arg9 = NULL; -int arg10 = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (int)lua_tonumber(L, 2); -arg10 = (int)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 6, "torch.IntTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (int)lua_tonumber(L, 2); -arg10 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor~2D* [IntTensor~2D] [int] IntTensor~1D IntTensor~1D | *IntTensor~2D* int [IntTensor~2D] int IntTensor~1D IntTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THIntTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THIntTensor_addr(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -THIntTensor *arg7 = NULL; -int arg7_idx = 0; -int arg8 = 0; -THIntTensor *arg9 = NULL; -int arg10 = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (int)lua_tonumber(L, 2); -arg10 = (int)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.IntTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (int)lua_tonumber(L, 2); -arg10 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor~2D* [IntTensor~2D] [int] IntTensor~3D IntTensor~3D | *IntTensor~2D* int [IntTensor~2D] int IntTensor~3D IntTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THIntTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THIntTensor_addbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 1; -THIntTensor *arg3 = NULL; -int arg4 = 1; -THIntTensor *arg5 = NULL; -THIntTensor *arg6 = NULL; -THIntTensor *arg7 = NULL; -int arg7_idx = 0; -int arg8 = 0; -THIntTensor *arg9 = NULL; -int arg10 = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.IntTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (int)lua_tonumber(L, 2); -arg10 = (int)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg9->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.IntTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.IntTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (int)lua_tonumber(L, 2); -arg10 = (int)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor~3D* [IntTensor~3D] [int] IntTensor~3D IntTensor~3D | *IntTensor~3D* int [IntTensor~3D] int IntTensor~3D IntTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THIntTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THIntTensor_baddbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor", type_buf); -} -arg2 = THIntTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_IntTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -long arg2 = 0; -THIntTensor *arg3 = NULL; -int arg3_idx = 0; -THIntTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor | [*IntTensor*] IntTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THIntTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.IntTensor"); -THIntTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -long arg2 = 0; -THIntTensor *arg3 = NULL; -int arg3_idx = 0; -THIntTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor | [*IntTensor*] IntTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THIntTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.IntTensor"); -THIntTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg2 = 0; -THIntTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THIntTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor | [*IntTensor*] [*LongTensor*] IntTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THIntTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.IntTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THIntTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_IntTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg2 = 0; -THIntTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THIntTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor | [*IntTensor*] [*LongTensor*] IntTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THIntTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.IntTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THIntTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_IntTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (int)lua_tonumber(L, 1); -arg4 = THIntTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (int)lua_tonumber(L, 2); -arg4 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [IntTensor] IntTensor | [*IntTensor*] [IntTensor] int", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.IntTensor"); -THIntTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (int)lua_tonumber(L, 1); -arg4 = THIntTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (int)lua_tonumber(L, 2); -arg4 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [IntTensor] IntTensor | [*IntTensor*] [IntTensor] int", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.IntTensor"); -THIntTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: IntTensor", type_buf); -} -arg2 = THIntTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_IntTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_IntTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -long arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* long long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_IntTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [Generator] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_randperm(arg1,arg2,arg3); - -THIntTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_IntTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THIntTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg2_idx = 1; -arg1 = THIntTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [*LongTensor*] IntTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THIntTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_IntTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THIntTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg2_idx = 1; -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THIntTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THIntTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [*LongTensor*] IntTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THIntTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_IntTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THIntTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THIntTensor_new(); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [*LongTensor*] IntTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THIntTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_IntTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THIntTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg2_idx = 1; -arg1 = THIntTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [*LongTensor*] IntTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THIntTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_IntTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THIntTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg2_idx = 1; -arg1 = THIntTensor_new(); -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THIntTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THIntTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] [*LongTensor*] IntTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THIntTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_IntTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int m_torch_IntTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -long arg4 = -2; -THIntTensor *arg5 = NULL; -int arg5_idx = 0; -THIntTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THIntTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.IntTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THIntTensor**)THAlloc(arg6_size * sizeof(THIntTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.IntTensor"))) - luaL_error(L, "expected IntTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THIntTensor**)THAlloc(arg6_size * sizeof(THIntTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.IntTensor"))) - luaL_error(L, "expected IntTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THIntTensor**)THAlloc(arg6_size * sizeof(THIntTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.IntTensor"))) - luaL_error(L, "expected IntTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.IntTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THIntTensor**)THAlloc(arg6_size * sizeof(THIntTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.IntTensor"))) - luaL_error(L, "expected IntTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor IntTensor [index] | [*IntTensor*] {IntTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.IntTensor"); -THIntTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THIntTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THIntTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *IntTensor* [Generator] long long | *IntTensor* [Generator] long | *IntTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THIntTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THIntTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *IntTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THIntTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *IntTensor* [Generator] [double] | *IntTensor* [Generator] FloatTensor | *IntTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THIntTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -THIntTensor *arg3 = NULL; -int arg3_idx = 0; -THIntTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THIntTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor | [*IntTensor*] IntTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THIntTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.IntTensor"); -{int hasdims = arg4->nDimension > 1; -THIntTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THIntTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int m_torch_IntTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THIntTensor_sign(arg1,arg2); -return 1; -} - -static int m_torch_IntTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -int arg6 = 1; -int arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -int arg11 = 0; -int arg12 = 1; -THIntTensor *arg13 = NULL; -THIntTensor *arg14 = NULL; -int arg15 = 1; -int arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THIntTensor *arg19 = NULL; -int arg19_idx = 0; -int arg20 = 0; -int arg21 = 1; -THIntTensor *arg22 = NULL; -THIntTensor *arg23 = NULL; -int arg24 = 1; -int arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THIntTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THIntTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THIntTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THIntTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~2D IntTensor~2D [(V|F)] | [*IntTensor*] IntTensor~3D IntTensor~3D [(V|F)] | [*IntTensor*] IntTensor~3D IntTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.IntTensor"); -THIntTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.IntTensor"); -THIntTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -int arg6 = 1; -int arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -int arg11 = 0; -int arg12 = 1; -THIntTensor *arg13 = NULL; -THIntTensor *arg14 = NULL; -int arg15 = 1; -int arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THIntTensor *arg19 = NULL; -int arg19_idx = 0; -int arg20 = 0; -int arg21 = 1; -THIntTensor *arg22 = NULL; -THIntTensor *arg23 = NULL; -int arg24 = 1; -int arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THIntTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THIntTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THIntTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THIntTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~2D IntTensor~2D [(V|F)] | [*IntTensor*] IntTensor~3D IntTensor~3D [(V|F)] | [*IntTensor*] IntTensor~3D IntTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.IntTensor"); -THIntTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.IntTensor"); -THIntTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -int arg6 = 1; -int arg7 = 1; -int arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THIntTensor *arg11 = NULL; -int arg11_idx = 0; -int arg12 = 0; -int arg13 = 1; -THIntTensor *arg14 = NULL; -THIntTensor *arg15 = NULL; -int arg16 = 1; -int arg17 = 1; -int arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THIntTensor *arg21 = NULL; -int arg21_idx = 0; -int arg22 = 0; -int arg23 = 1; -THIntTensor *arg24 = NULL; -THIntTensor *arg25 = NULL; -int arg26 = 1; -int arg27 = 1; -int arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THIntTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THIntTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THIntTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THIntTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~3D IntTensor~3D [(V|F)] | [*IntTensor*] IntTensor~4D IntTensor~4D [(V|F)] | [*IntTensor*] IntTensor~4D IntTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.IntTensor"); -THIntTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.IntTensor"); -THIntTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -int arg2 = 0; -int arg3 = 1; -THIntTensor *arg4 = NULL; -THIntTensor *arg5 = NULL; -int arg6 = 1; -int arg7 = 1; -int arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THIntTensor *arg11 = NULL; -int arg11_idx = 0; -int arg12 = 0; -int arg13 = 1; -THIntTensor *arg14 = NULL; -THIntTensor *arg15 = NULL; -int arg16 = 1; -int arg17 = 1; -int arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THIntTensor *arg21 = NULL; -int arg21_idx = 0; -int arg22 = 0; -int arg23 = 1; -THIntTensor *arg24 = NULL; -THIntTensor *arg25 = NULL; -int arg26 = 1; -int arg27 = 1; -int arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THIntTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THIntTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THIntTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THIntTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THIntTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.IntTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.IntTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*IntTensor*] IntTensor~3D IntTensor~3D [(V|F)] | [*IntTensor*] IntTensor~4D IntTensor~4D [(V|F)] | [*IntTensor*] IntTensor~4D IntTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.IntTensor"); -THIntTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.IntTensor"); -THIntTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.IntTensor"); -THIntTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -THIntTensor *arg4 = NULL; -int arg4_idx = 0; -THIntTensor *arg5 = NULL; -int arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THIntTensor *arg8 = NULL; -THIntTensor *arg9 = NULL; -THIntTensor *arg10 = NULL; -int arg10_idx = 0; -THIntTensor *arg11 = NULL; -THIntTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (int)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.IntTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (int)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.IntTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] IntTensor int | *IntTensor* IntTensor int | [*ByteTensor*] IntTensor IntTensor | *IntTensor* IntTensor IntTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THIntTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THIntTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THIntTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THIntTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_IntTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] IntTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THIntTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_IntTensor_abs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THIntTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg3 = 0; -int arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.IntTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (int)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *IntTensor* [IntTensor] | int", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THIntTensor_abs(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = abs(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static const struct luaL_Reg m_torch_IntTensorMath__ [] = { -{"zero", m_torch_IntTensor_zero}, -{"fill", m_torch_IntTensor_fill}, -{"zeros", m_torch_IntTensor_zeros}, -{"ones", m_torch_IntTensor_ones}, -{"reshape", m_torch_IntTensor_reshape}, -{"gather", m_torch_IntTensor_gather}, -{"scatter", m_torch_IntTensor_scatter}, -{"dot", m_torch_IntTensor_dot}, -{"equal", m_torch_IntTensor_equal}, -{"add", m_torch_IntTensor_add}, -{"csub", m_torch_IntTensor_csub}, -{"mul", m_torch_IntTensor_mul}, -{"div", m_torch_IntTensor_div}, -{"lshift", m_torch_IntTensor_lshift}, -{"rshift", m_torch_IntTensor_rshift}, -{"fmod", m_torch_IntTensor_fmod}, -{"remainder", m_torch_IntTensor_remainder}, -{"bitand", m_torch_IntTensor_bitand}, -{"bitor", m_torch_IntTensor_bitor}, -{"bitxor", m_torch_IntTensor_bitxor}, -{"mod", m_torch_IntTensor_mod}, -{"clamp", m_torch_IntTensor_clamp}, -{"match", m_torch_IntTensor_match}, -{"cmul", m_torch_IntTensor_cmul}, -{"cpow", m_torch_IntTensor_cpow}, -{"cdiv", m_torch_IntTensor_cdiv}, -{"clshift", m_torch_IntTensor_clshift}, -{"crshift", m_torch_IntTensor_crshift}, -{"cfmod", m_torch_IntTensor_cfmod}, -{"cremainder", m_torch_IntTensor_cremainder}, -{"cbitand", m_torch_IntTensor_cbitand}, -{"cbitor", m_torch_IntTensor_cbitor}, -{"cbitxor", m_torch_IntTensor_cbitxor}, -{"cmod", m_torch_IntTensor_cmod}, -{"addcmul", m_torch_IntTensor_addcmul}, -{"addcdiv", m_torch_IntTensor_addcdiv}, -{"mv", m_torch_IntTensor_mv}, -{"mm", m_torch_IntTensor_mm}, -{"bmm", m_torch_IntTensor_bmm}, -{"ger", m_torch_IntTensor_ger}, -{"addmv", m_torch_IntTensor_addmv}, -{"addmm", m_torch_IntTensor_addmm}, -{"addr", m_torch_IntTensor_addr}, -{"addbmm", m_torch_IntTensor_addbmm}, -{"baddbmm", m_torch_IntTensor_baddbmm}, -{"numel", m_torch_IntTensor_numel}, -{"cumsum", m_torch_IntTensor_cumsum}, -{"cumprod", m_torch_IntTensor_cumprod}, -{"sum", m_torch_IntTensor_sum}, -{"prod", m_torch_IntTensor_prod}, -{"min", m_torch_IntTensor_min}, -{"max", m_torch_IntTensor_max}, -{"cmin", m_torch_IntTensor_cmin}, -{"cmax", m_torch_IntTensor_cmax}, -{"trace", m_torch_IntTensor_trace}, -{"cross", m_torch_IntTensor_cross}, -{"diag", m_torch_IntTensor_diag}, -{"eye", m_torch_IntTensor_eye}, -{"range", m_torch_IntTensor_range}, -{"randperm", m_torch_IntTensor_randperm}, -{"sort", m_torch_IntTensor_sort}, -{"topk", m_torch_IntTensor_topk}, -{"kthvalue", m_torch_IntTensor_kthvalue}, -{"mode", m_torch_IntTensor_mode}, -{"median", m_torch_IntTensor_median}, -{"tril", m_torch_IntTensor_tril}, -{"triu", m_torch_IntTensor_triu}, -{"cat", m_torch_IntTensor_cat}, -{"random", m_torch_IntTensor_random}, -{"geometric", m_torch_IntTensor_geometric}, -{"bernoulli", m_torch_IntTensor_bernoulli}, -{"squeeze", m_torch_IntTensor_squeeze}, -{"sign", m_torch_IntTensor_sign}, -{"conv2", m_torch_IntTensor_conv2}, -{"xcorr2", m_torch_IntTensor_xcorr2}, -{"conv3", m_torch_IntTensor_conv3}, -{"xcorr3", m_torch_IntTensor_xcorr3}, -{"lt", m_torch_IntTensor_lt}, -{"gt", m_torch_IntTensor_gt}, -{"le", m_torch_IntTensor_le}, -{"ge", m_torch_IntTensor_ge}, -{"eq", m_torch_IntTensor_eq}, -{"ne", m_torch_IntTensor_ne}, -{"nonzero", m_torch_IntTensor_nonzero}, -{"abs", m_torch_IntTensor_abs}, -{NULL, NULL} -}; - -static const struct luaL_Reg torch_IntTensorMath__ [] = { -{"zero", torch_IntTensor_zero}, -{"fill", torch_IntTensor_fill}, -{"zeros", torch_IntTensor_zeros}, -{"ones", torch_IntTensor_ones}, -{"reshape", torch_IntTensor_reshape}, -{"gather", torch_IntTensor_gather}, -{"scatter", torch_IntTensor_scatter}, -{"dot", torch_IntTensor_dot}, -{"equal", torch_IntTensor_equal}, -{"add", torch_IntTensor_add}, -{"csub", torch_IntTensor_csub}, -{"mul", torch_IntTensor_mul}, -{"div", torch_IntTensor_div}, -{"lshift", torch_IntTensor_lshift}, -{"rshift", torch_IntTensor_rshift}, -{"fmod", torch_IntTensor_fmod}, -{"remainder", torch_IntTensor_remainder}, -{"bitand", torch_IntTensor_bitand}, -{"bitor", torch_IntTensor_bitor}, -{"bitxor", torch_IntTensor_bitxor}, -{"mod", torch_IntTensor_mod}, -{"clamp", torch_IntTensor_clamp}, -{"match", torch_IntTensor_match}, -{"cmul", torch_IntTensor_cmul}, -{"cpow", torch_IntTensor_cpow}, -{"cdiv", torch_IntTensor_cdiv}, -{"clshift", torch_IntTensor_clshift}, -{"crshift", torch_IntTensor_crshift}, -{"cfmod", torch_IntTensor_cfmod}, -{"cremainder", torch_IntTensor_cremainder}, -{"cbitand", torch_IntTensor_cbitand}, -{"cbitor", torch_IntTensor_cbitor}, -{"cbitxor", torch_IntTensor_cbitxor}, -{"cmod", torch_IntTensor_cmod}, -{"addcmul", torch_IntTensor_addcmul}, -{"addcdiv", torch_IntTensor_addcdiv}, -{"mv", torch_IntTensor_mv}, -{"mm", torch_IntTensor_mm}, -{"bmm", torch_IntTensor_bmm}, -{"ger", torch_IntTensor_ger}, -{"addmv", torch_IntTensor_addmv}, -{"addmm", torch_IntTensor_addmm}, -{"addr", torch_IntTensor_addr}, -{"addbmm", torch_IntTensor_addbmm}, -{"baddbmm", torch_IntTensor_baddbmm}, -{"numel", torch_IntTensor_numel}, -{"cumsum", torch_IntTensor_cumsum}, -{"cumprod", torch_IntTensor_cumprod}, -{"sum", torch_IntTensor_sum}, -{"prod", torch_IntTensor_prod}, -{"min", torch_IntTensor_min}, -{"max", torch_IntTensor_max}, -{"cmin", torch_IntTensor_cmin}, -{"cmax", torch_IntTensor_cmax}, -{"trace", torch_IntTensor_trace}, -{"cross", torch_IntTensor_cross}, -{"diag", torch_IntTensor_diag}, -{"eye", torch_IntTensor_eye}, -{"range", torch_IntTensor_range}, -{"randperm", torch_IntTensor_randperm}, -{"sort", torch_IntTensor_sort}, -{"topk", torch_IntTensor_topk}, -{"kthvalue", torch_IntTensor_kthvalue}, -{"mode", torch_IntTensor_mode}, -{"median", torch_IntTensor_median}, -{"tril", torch_IntTensor_tril}, -{"triu", torch_IntTensor_triu}, -{"cat", torch_IntTensor_cat}, -{"random", torch_IntTensor_random}, -{"geometric", torch_IntTensor_geometric}, -{"bernoulli", torch_IntTensor_bernoulli}, -{"squeeze", torch_IntTensor_squeeze}, -{"sign", torch_IntTensor_sign}, -{"conv2", torch_IntTensor_conv2}, -{"xcorr2", torch_IntTensor_xcorr2}, -{"conv3", torch_IntTensor_conv3}, -{"xcorr3", torch_IntTensor_xcorr3}, -{"lt", torch_IntTensor_lt}, -{"gt", torch_IntTensor_gt}, -{"le", torch_IntTensor_le}, -{"ge", torch_IntTensor_ge}, -{"eq", torch_IntTensor_eq}, -{"ne", torch_IntTensor_ne}, -{"nonzero", torch_IntTensor_nonzero}, -{"abs", torch_IntTensor_abs}, -{NULL, NULL} -}; - -static void torch_IntTensorMath_init(lua_State *L) -{ - luaT_pushmetatable(L, "torch.IntTensor"); - - /* register methods */ - luaT_setfuncs(L, m_torch_IntTensorMath__, 0); - - /* register functions into the "torch" field of the tensor metaclass */ - lua_pushstring(L, "torch"); - lua_newtable(L); - luaT_setfuncs(L, torch_IntTensorMath__, 0); - lua_rawset(L, -3); - lua_pop(L, 1); -} - -static int torch_LongTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_zero(arg1); -return 1; -} - -static int torch_LongTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_fill(arg1,arg2); -return 1; -} - -static int torch_LongTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THLongTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_LongTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THLongTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_LongTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_LongTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THLongTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_LongTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -long arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* index LongTensor LongTensor | *LongTensor* index LongTensor long", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THLongTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THLongTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_LongTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor LongTensor", type_buf); -} -arg3 = THLongTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int torch_LongTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -THLongTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor LongTensor", type_buf); -} -arg3 = THLongTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int torch_LongTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 1; -THLongTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 2; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -arg4 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long | [*LongTensor*] LongTensor [long] LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THLongTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_LongTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 1; -THLongTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 2; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -arg4 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long | [*LongTensor*] LongTensor [long] LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THLongTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_LongTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_div(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -long arg4 = 0; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor long long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_LongTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -long arg4 = 1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_LongTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [long] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_LongTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [long] LongTensor LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_LongTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THLongTensor_new(); -THLongTensor_resize1d(arg1, arg5->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~2D LongTensor~1D", type_buf); -} -THLongTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_LongTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THLongTensor_new(); -THLongTensor_resize2d(arg1, arg5->size[0], arg6->size[1]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~2D LongTensor~2D", type_buf); -} -THLongTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_LongTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THLongTensor_new(); -THLongTensor_resize3d(arg1, arg5->size[0], arg5->size[1], arg6->size[2]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~3D LongTensor~3D", type_buf); -} -THLongTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_LongTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THLongTensor_new(); -THLongTensor_resize2d(arg1, arg5->size[0], arg6->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~1D LongTensor~1D", type_buf); -} -THLongTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_LongTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [long] LongTensor~1D [long] LongTensor~2D LongTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_LongTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [long] LongTensor~2D [long] LongTensor~2D LongTensor~2D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_LongTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 6, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [long] LongTensor~2D [long] LongTensor~1D LongTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_LongTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [long] LongTensor~2D [long] LongTensor~3D LongTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_LongTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [long] LongTensor~3D [long] LongTensor~3D LongTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_LongTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor", type_buf); -} -arg2 = THLongTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_LongTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -long arg2 = 0; -THLongTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor | [*LongTensor*] LongTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THLongTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.LongTensor"); -THLongTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_LongTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -long arg2 = 0; -THLongTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor | [*LongTensor*] LongTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THLongTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.LongTensor"); -THLongTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_LongTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -long arg2 = 0; -THLongTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THLongTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor | [*LongTensor*] [*LongTensor*] LongTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THLongTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.LongTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THLongTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_LongTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -long arg2 = 0; -THLongTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THLongTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor | [*LongTensor*] [*LongTensor*] LongTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THLongTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.LongTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THLongTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_LongTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor | [*LongTensor*] LongTensor long", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THLongTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_LongTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor | [*LongTensor*] LongTensor long", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THLongTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_LongTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor", type_buf); -} -arg2 = THLongTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_LongTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_LongTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -long arg4 = 1; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] long long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_LongTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg3 = (long)lua_tonumber(L, 1); -arg1 = THLongTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [Generator] long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_randperm(arg1,arg2,arg3); - -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_LongTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THLongTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg2_idx = 1; -arg1 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [*LongTensor*] LongTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THLongTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_LongTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THLongTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg2_idx = 1; -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THLongTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [*LongTensor*] LongTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THLongTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_LongTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THLongTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [*LongTensor*] LongTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THLongTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_LongTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THLongTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg2_idx = 1; -arg1 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [*LongTensor*] LongTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THLongTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_LongTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THLongTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg2_idx = 1; -arg1 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [*LongTensor*] LongTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THLongTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_LongTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int torch_LongTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -long arg4 = -2; -THLongTensor *arg5 = NULL; -int arg5_idx = 0; -THLongTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THLongTensor**)THAlloc(arg6_size * sizeof(THLongTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.LongTensor"))) - luaL_error(L, "expected LongTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THLongTensor**)THAlloc(arg6_size * sizeof(THLongTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.LongTensor"))) - luaL_error(L, "expected LongTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THLongTensor**)THAlloc(arg6_size * sizeof(THLongTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.LongTensor"))) - luaL_error(L, "expected LongTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THLongTensor**)THAlloc(arg6_size * sizeof(THLongTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.LongTensor"))) - luaL_error(L, "expected LongTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor [index] | [*LongTensor*] {LongTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.LongTensor"); -THLongTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static void THLongTensor_random2__(THLongTensor *self, THGenerator *gen, long a, long b) -{ - THArgCheck(b >= a, 2, "upper bound must be larger than lower bound"); - TH_TENSOR_APPLY(long, self, *self_data = ((THRandom_random(gen) % (b+1-a)) + a);) -} - -static void THLongTensor_random1__(THLongTensor *self, THGenerator *gen, long b) -{ - THArgCheck(b > 0, 1, "upper bound must be strictly positive"); - TH_TENSOR_APPLY(long, self, *self_data = (THRandom_random(gen) % b + 1);) -} - -static int torch_LongTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THLongTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THLongTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *LongTensor* [Generator] long long | *LongTensor* [Generator] long | *LongTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THLongTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THLongTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int torch_LongTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *LongTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_LongTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THLongTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *LongTensor* [Generator] [double] | *LongTensor* [Generator] FloatTensor | *LongTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THLongTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_LongTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor | [*LongTensor*] LongTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THLongTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.LongTensor"); -{int hasdims = arg4->nDimension > 1; -THLongTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THLongTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int torch_LongTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_sign(arg1,arg2); -return 1; -} - -static int torch_LongTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -long arg6 = 1; -long arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -long arg11 = 0; -long arg12 = 1; -THLongTensor *arg13 = NULL; -THLongTensor *arg14 = NULL; -long arg15 = 1; -long arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THLongTensor *arg19 = NULL; -int arg19_idx = 0; -long arg20 = 0; -long arg21 = 1; -THLongTensor *arg22 = NULL; -THLongTensor *arg23 = NULL; -long arg24 = 1; -long arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THLongTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THLongTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THLongTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THLongTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~2D LongTensor~2D [(V|F)] | [*LongTensor*] LongTensor~3D LongTensor~3D [(V|F)] | [*LongTensor*] LongTensor~3D LongTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.LongTensor"); -THLongTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.LongTensor"); -THLongTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_LongTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -long arg6 = 1; -long arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -long arg11 = 0; -long arg12 = 1; -THLongTensor *arg13 = NULL; -THLongTensor *arg14 = NULL; -long arg15 = 1; -long arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THLongTensor *arg19 = NULL; -int arg19_idx = 0; -long arg20 = 0; -long arg21 = 1; -THLongTensor *arg22 = NULL; -THLongTensor *arg23 = NULL; -long arg24 = 1; -long arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THLongTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THLongTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THLongTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THLongTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~2D LongTensor~2D [(V|F)] | [*LongTensor*] LongTensor~3D LongTensor~3D [(V|F)] | [*LongTensor*] LongTensor~3D LongTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.LongTensor"); -THLongTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.LongTensor"); -THLongTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_LongTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -long arg6 = 1; -long arg7 = 1; -long arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THLongTensor *arg11 = NULL; -int arg11_idx = 0; -long arg12 = 0; -long arg13 = 1; -THLongTensor *arg14 = NULL; -THLongTensor *arg15 = NULL; -long arg16 = 1; -long arg17 = 1; -long arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THLongTensor *arg21 = NULL; -int arg21_idx = 0; -long arg22 = 0; -long arg23 = 1; -THLongTensor *arg24 = NULL; -THLongTensor *arg25 = NULL; -long arg26 = 1; -long arg27 = 1; -long arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THLongTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THLongTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THLongTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THLongTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~3D LongTensor~3D [(V|F)] | [*LongTensor*] LongTensor~4D LongTensor~4D [(V|F)] | [*LongTensor*] LongTensor~4D LongTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.LongTensor"); -THLongTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.LongTensor"); -THLongTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_LongTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -long arg6 = 1; -long arg7 = 1; -long arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THLongTensor *arg11 = NULL; -int arg11_idx = 0; -long arg12 = 0; -long arg13 = 1; -THLongTensor *arg14 = NULL; -THLongTensor *arg15 = NULL; -long arg16 = 1; -long arg17 = 1; -long arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THLongTensor *arg21 = NULL; -int arg21_idx = 0; -long arg22 = 0; -long arg23 = 1; -THLongTensor *arg24 = NULL; -THLongTensor *arg25 = NULL; -long arg26 = 1; -long arg27 = 1; -long arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THLongTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THLongTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THLongTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THLongTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~3D LongTensor~3D [(V|F)] | [*LongTensor*] LongTensor~4D LongTensor~4D [(V|F)] | [*LongTensor*] LongTensor~4D LongTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.LongTensor"); -THLongTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.LongTensor"); -THLongTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_LongTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_LongTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_LongTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_LongTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_LongTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_LongTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_LongTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_LongTensor_abs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -long arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (long)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor | long", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_abs(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = labs(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_zero(arg1); -return 1; -} - -static int m_torch_LongTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_fill(arg1,arg2); -return 1; -} - -static int m_torch_LongTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_LongTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_LongTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_LongTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THLongTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_LongTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -long arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* index LongTensor LongTensor | *LongTensor* index LongTensor long", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THLongTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THLongTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor LongTensor", type_buf); -} -arg3 = THLongTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int m_torch_LongTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -THLongTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor LongTensor", type_buf); -} -arg3 = THLongTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int m_torch_LongTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 1; -THLongTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long | *LongTensor* [LongTensor] [long] LongTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THLongTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 1; -THLongTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long | *LongTensor* [LongTensor] [long] LongTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THLongTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_div(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -long arg4 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] long long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_LongTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -long arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* LongTensor LongTensor [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_LongTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] [long] LongTensor LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_LongTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] [long] LongTensor LongTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_LongTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* LongTensor~2D LongTensor~1D", type_buf); -} -THLongTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THLongTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_LongTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* LongTensor~2D LongTensor~2D", type_buf); -} -THLongTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THLongTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_LongTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* LongTensor~3D LongTensor~3D", type_buf); -} -THLongTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THLongTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_LongTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* LongTensor~1D LongTensor~1D", type_buf); -} -THLongTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THLongTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_LongTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -THLongTensor *arg7 = NULL; -int arg7_idx = 0; -long arg8 = 0; -THLongTensor *arg9 = NULL; -long arg10 = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (long)lua_tonumber(L, 2); -arg10 = (long)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg9->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.LongTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (long)lua_tonumber(L, 2); -arg10 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor~1D* [LongTensor~1D] [long] LongTensor~2D LongTensor~1D | *LongTensor~1D* long [LongTensor~1D] long LongTensor~2D LongTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THLongTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THLongTensor_addmv(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -THLongTensor *arg7 = NULL; -int arg7_idx = 0; -long arg8 = 0; -THLongTensor *arg9 = NULL; -long arg10 = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (long)lua_tonumber(L, 2); -arg10 = (long)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.LongTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (long)lua_tonumber(L, 2); -arg10 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor~2D* [LongTensor~2D] [long] LongTensor~2D LongTensor~2D | *LongTensor~2D* long [LongTensor~2D] long LongTensor~2D LongTensor~2D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THLongTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THLongTensor_addmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -THLongTensor *arg7 = NULL; -int arg7_idx = 0; -long arg8 = 0; -THLongTensor *arg9 = NULL; -long arg10 = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (long)lua_tonumber(L, 2); -arg10 = (long)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 6, "torch.LongTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (long)lua_tonumber(L, 2); -arg10 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor~2D* [LongTensor~2D] [long] LongTensor~1D LongTensor~1D | *LongTensor~2D* long [LongTensor~2D] long LongTensor~1D LongTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THLongTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THLongTensor_addr(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -THLongTensor *arg7 = NULL; -int arg7_idx = 0; -long arg8 = 0; -THLongTensor *arg9 = NULL; -long arg10 = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (long)lua_tonumber(L, 2); -arg10 = (long)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.LongTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (long)lua_tonumber(L, 2); -arg10 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor~2D* [LongTensor~2D] [long] LongTensor~3D LongTensor~3D | *LongTensor~2D* long [LongTensor~2D] long LongTensor~3D LongTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THLongTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THLongTensor_addbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 1; -THLongTensor *arg3 = NULL; -long arg4 = 1; -THLongTensor *arg5 = NULL; -THLongTensor *arg6 = NULL; -THLongTensor *arg7 = NULL; -int arg7_idx = 0; -long arg8 = 0; -THLongTensor *arg9 = NULL; -long arg10 = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.LongTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (long)lua_tonumber(L, 2); -arg10 = (long)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg9->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.LongTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.LongTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (long)lua_tonumber(L, 2); -arg10 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor~3D* [LongTensor~3D] [long] LongTensor~3D LongTensor~3D | *LongTensor~3D* long [LongTensor~3D] long LongTensor~3D LongTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THLongTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THLongTensor_baddbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor", type_buf); -} -arg2 = THLongTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_LongTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -long arg2 = 0; -THLongTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor | [*LongTensor*] LongTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THLongTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.LongTensor"); -THLongTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -long arg2 = 0; -THLongTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor | [*LongTensor*] LongTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THLongTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.LongTensor"); -THLongTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -long arg2 = 0; -THLongTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THLongTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor | [*LongTensor*] [*LongTensor*] LongTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THLongTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.LongTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THLongTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_LongTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -long arg2 = 0; -THLongTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THLongTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor | [*LongTensor*] [*LongTensor*] LongTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THLongTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.LongTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THLongTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_LongTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -arg4 = THLongTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [LongTensor] LongTensor | [*LongTensor*] [LongTensor] long", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THLongTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -arg4 = THLongTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [LongTensor] LongTensor | [*LongTensor*] [LongTensor] long", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THLongTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: LongTensor", type_buf); -} -arg2 = THLongTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_LongTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_LongTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -long arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* long long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_LongTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [Generator] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_randperm(arg1,arg2,arg3); - -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_LongTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THLongTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg2_idx = 1; -arg1 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [*LongTensor*] LongTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THLongTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_LongTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THLongTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg2_idx = 1; -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THLongTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [*LongTensor*] LongTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THLongTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_LongTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THLongTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [*LongTensor*] LongTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THLongTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_LongTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THLongTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg2_idx = 1; -arg1 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [*LongTensor*] LongTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THLongTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_LongTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THLongTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg2_idx = 1; -arg1 = THLongTensor_new(); -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THLongTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THLongTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [*LongTensor*] LongTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THLongTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_LongTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int m_torch_LongTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -long arg4 = -2; -THLongTensor *arg5 = NULL; -int arg5_idx = 0; -THLongTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THLongTensor**)THAlloc(arg6_size * sizeof(THLongTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.LongTensor"))) - luaL_error(L, "expected LongTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THLongTensor**)THAlloc(arg6_size * sizeof(THLongTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.LongTensor"))) - luaL_error(L, "expected LongTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THLongTensor**)THAlloc(arg6_size * sizeof(THLongTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.LongTensor"))) - luaL_error(L, "expected LongTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.LongTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THLongTensor**)THAlloc(arg6_size * sizeof(THLongTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.LongTensor"))) - luaL_error(L, "expected LongTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor LongTensor [index] | [*LongTensor*] {LongTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.LongTensor"); -THLongTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THLongTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THLongTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *LongTensor* [Generator] long long | *LongTensor* [Generator] long | *LongTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THLongTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THLongTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *LongTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THLongTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *LongTensor* [Generator] [double] | *LongTensor* [Generator] FloatTensor | *LongTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THLongTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -THLongTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor | [*LongTensor*] LongTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THLongTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.LongTensor"); -{int hasdims = arg4->nDimension > 1; -THLongTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THLongTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int m_torch_LongTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THLongTensor_sign(arg1,arg2); -return 1; -} - -static int m_torch_LongTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -long arg6 = 1; -long arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -long arg11 = 0; -long arg12 = 1; -THLongTensor *arg13 = NULL; -THLongTensor *arg14 = NULL; -long arg15 = 1; -long arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THLongTensor *arg19 = NULL; -int arg19_idx = 0; -long arg20 = 0; -long arg21 = 1; -THLongTensor *arg22 = NULL; -THLongTensor *arg23 = NULL; -long arg24 = 1; -long arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THLongTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THLongTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THLongTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THLongTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~2D LongTensor~2D [(V|F)] | [*LongTensor*] LongTensor~3D LongTensor~3D [(V|F)] | [*LongTensor*] LongTensor~3D LongTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.LongTensor"); -THLongTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.LongTensor"); -THLongTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -long arg6 = 1; -long arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -long arg11 = 0; -long arg12 = 1; -THLongTensor *arg13 = NULL; -THLongTensor *arg14 = NULL; -long arg15 = 1; -long arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THLongTensor *arg19 = NULL; -int arg19_idx = 0; -long arg20 = 0; -long arg21 = 1; -THLongTensor *arg22 = NULL; -THLongTensor *arg23 = NULL; -long arg24 = 1; -long arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THLongTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THLongTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THLongTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THLongTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~2D LongTensor~2D [(V|F)] | [*LongTensor*] LongTensor~3D LongTensor~3D [(V|F)] | [*LongTensor*] LongTensor~3D LongTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.LongTensor"); -THLongTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.LongTensor"); -THLongTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -long arg6 = 1; -long arg7 = 1; -long arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THLongTensor *arg11 = NULL; -int arg11_idx = 0; -long arg12 = 0; -long arg13 = 1; -THLongTensor *arg14 = NULL; -THLongTensor *arg15 = NULL; -long arg16 = 1; -long arg17 = 1; -long arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THLongTensor *arg21 = NULL; -int arg21_idx = 0; -long arg22 = 0; -long arg23 = 1; -THLongTensor *arg24 = NULL; -THLongTensor *arg25 = NULL; -long arg26 = 1; -long arg27 = 1; -long arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THLongTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THLongTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THLongTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THLongTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~3D LongTensor~3D [(V|F)] | [*LongTensor*] LongTensor~4D LongTensor~4D [(V|F)] | [*LongTensor*] LongTensor~4D LongTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.LongTensor"); -THLongTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.LongTensor"); -THLongTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 1; -THLongTensor *arg4 = NULL; -THLongTensor *arg5 = NULL; -long arg6 = 1; -long arg7 = 1; -long arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THLongTensor *arg11 = NULL; -int arg11_idx = 0; -long arg12 = 0; -long arg13 = 1; -THLongTensor *arg14 = NULL; -THLongTensor *arg15 = NULL; -long arg16 = 1; -long arg17 = 1; -long arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THLongTensor *arg21 = NULL; -int arg21_idx = 0; -long arg22 = 0; -long arg23 = 1; -THLongTensor *arg24 = NULL; -THLongTensor *arg25 = NULL; -long arg26 = 1; -long arg27 = 1; -long arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THLongTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THLongTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THLongTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THLongTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THLongTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.LongTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.LongTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor~3D LongTensor~3D [(V|F)] | [*LongTensor*] LongTensor~4D LongTensor~4D [(V|F)] | [*LongTensor*] LongTensor~4D LongTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.LongTensor"); -THLongTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.LongTensor"); -THLongTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THLongTensor *arg5 = NULL; -long arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THLongTensor *arg8 = NULL; -THLongTensor *arg9 = NULL; -THLongTensor *arg10 = NULL; -int arg10_idx = 0; -THLongTensor *arg11 = NULL; -THLongTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (long)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.LongTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] LongTensor long | *LongTensor* LongTensor long | [*ByteTensor*] LongTensor LongTensor | *LongTensor* LongTensor LongTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THLongTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THLongTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THLongTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THLongTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_LongTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THLongTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_LongTensor_abs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -long arg3 = 0; -long arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (long)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [LongTensor] | long", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THLongTensor_abs(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = labs(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static const struct luaL_Reg m_torch_LongTensorMath__ [] = { -{"zero", m_torch_LongTensor_zero}, -{"fill", m_torch_LongTensor_fill}, -{"zeros", m_torch_LongTensor_zeros}, -{"ones", m_torch_LongTensor_ones}, -{"reshape", m_torch_LongTensor_reshape}, -{"gather", m_torch_LongTensor_gather}, -{"scatter", m_torch_LongTensor_scatter}, -{"dot", m_torch_LongTensor_dot}, -{"equal", m_torch_LongTensor_equal}, -{"add", m_torch_LongTensor_add}, -{"csub", m_torch_LongTensor_csub}, -{"mul", m_torch_LongTensor_mul}, -{"div", m_torch_LongTensor_div}, -{"lshift", m_torch_LongTensor_lshift}, -{"rshift", m_torch_LongTensor_rshift}, -{"fmod", m_torch_LongTensor_fmod}, -{"remainder", m_torch_LongTensor_remainder}, -{"bitand", m_torch_LongTensor_bitand}, -{"bitor", m_torch_LongTensor_bitor}, -{"bitxor", m_torch_LongTensor_bitxor}, -{"mod", m_torch_LongTensor_mod}, -{"clamp", m_torch_LongTensor_clamp}, -{"match", m_torch_LongTensor_match}, -{"cmul", m_torch_LongTensor_cmul}, -{"cpow", m_torch_LongTensor_cpow}, -{"cdiv", m_torch_LongTensor_cdiv}, -{"clshift", m_torch_LongTensor_clshift}, -{"crshift", m_torch_LongTensor_crshift}, -{"cfmod", m_torch_LongTensor_cfmod}, -{"cremainder", m_torch_LongTensor_cremainder}, -{"cbitand", m_torch_LongTensor_cbitand}, -{"cbitor", m_torch_LongTensor_cbitor}, -{"cbitxor", m_torch_LongTensor_cbitxor}, -{"cmod", m_torch_LongTensor_cmod}, -{"addcmul", m_torch_LongTensor_addcmul}, -{"addcdiv", m_torch_LongTensor_addcdiv}, -{"mv", m_torch_LongTensor_mv}, -{"mm", m_torch_LongTensor_mm}, -{"bmm", m_torch_LongTensor_bmm}, -{"ger", m_torch_LongTensor_ger}, -{"addmv", m_torch_LongTensor_addmv}, -{"addmm", m_torch_LongTensor_addmm}, -{"addr", m_torch_LongTensor_addr}, -{"addbmm", m_torch_LongTensor_addbmm}, -{"baddbmm", m_torch_LongTensor_baddbmm}, -{"numel", m_torch_LongTensor_numel}, -{"cumsum", m_torch_LongTensor_cumsum}, -{"cumprod", m_torch_LongTensor_cumprod}, -{"sum", m_torch_LongTensor_sum}, -{"prod", m_torch_LongTensor_prod}, -{"min", m_torch_LongTensor_min}, -{"max", m_torch_LongTensor_max}, -{"cmin", m_torch_LongTensor_cmin}, -{"cmax", m_torch_LongTensor_cmax}, -{"trace", m_torch_LongTensor_trace}, -{"cross", m_torch_LongTensor_cross}, -{"diag", m_torch_LongTensor_diag}, -{"eye", m_torch_LongTensor_eye}, -{"range", m_torch_LongTensor_range}, -{"randperm", m_torch_LongTensor_randperm}, -{"sort", m_torch_LongTensor_sort}, -{"topk", m_torch_LongTensor_topk}, -{"kthvalue", m_torch_LongTensor_kthvalue}, -{"mode", m_torch_LongTensor_mode}, -{"median", m_torch_LongTensor_median}, -{"tril", m_torch_LongTensor_tril}, -{"triu", m_torch_LongTensor_triu}, -{"cat", m_torch_LongTensor_cat}, -{"random", m_torch_LongTensor_random}, -{"geometric", m_torch_LongTensor_geometric}, -{"bernoulli", m_torch_LongTensor_bernoulli}, -{"squeeze", m_torch_LongTensor_squeeze}, -{"sign", m_torch_LongTensor_sign}, -{"conv2", m_torch_LongTensor_conv2}, -{"xcorr2", m_torch_LongTensor_xcorr2}, -{"conv3", m_torch_LongTensor_conv3}, -{"xcorr3", m_torch_LongTensor_xcorr3}, -{"lt", m_torch_LongTensor_lt}, -{"gt", m_torch_LongTensor_gt}, -{"le", m_torch_LongTensor_le}, -{"ge", m_torch_LongTensor_ge}, -{"eq", m_torch_LongTensor_eq}, -{"ne", m_torch_LongTensor_ne}, -{"nonzero", m_torch_LongTensor_nonzero}, -{"abs", m_torch_LongTensor_abs}, -{NULL, NULL} -}; - -static const struct luaL_Reg torch_LongTensorMath__ [] = { -{"zero", torch_LongTensor_zero}, -{"fill", torch_LongTensor_fill}, -{"zeros", torch_LongTensor_zeros}, -{"ones", torch_LongTensor_ones}, -{"reshape", torch_LongTensor_reshape}, -{"gather", torch_LongTensor_gather}, -{"scatter", torch_LongTensor_scatter}, -{"dot", torch_LongTensor_dot}, -{"equal", torch_LongTensor_equal}, -{"add", torch_LongTensor_add}, -{"csub", torch_LongTensor_csub}, -{"mul", torch_LongTensor_mul}, -{"div", torch_LongTensor_div}, -{"lshift", torch_LongTensor_lshift}, -{"rshift", torch_LongTensor_rshift}, -{"fmod", torch_LongTensor_fmod}, -{"remainder", torch_LongTensor_remainder}, -{"bitand", torch_LongTensor_bitand}, -{"bitor", torch_LongTensor_bitor}, -{"bitxor", torch_LongTensor_bitxor}, -{"mod", torch_LongTensor_mod}, -{"clamp", torch_LongTensor_clamp}, -{"match", torch_LongTensor_match}, -{"cmul", torch_LongTensor_cmul}, -{"cpow", torch_LongTensor_cpow}, -{"cdiv", torch_LongTensor_cdiv}, -{"clshift", torch_LongTensor_clshift}, -{"crshift", torch_LongTensor_crshift}, -{"cfmod", torch_LongTensor_cfmod}, -{"cremainder", torch_LongTensor_cremainder}, -{"cbitand", torch_LongTensor_cbitand}, -{"cbitor", torch_LongTensor_cbitor}, -{"cbitxor", torch_LongTensor_cbitxor}, -{"cmod", torch_LongTensor_cmod}, -{"addcmul", torch_LongTensor_addcmul}, -{"addcdiv", torch_LongTensor_addcdiv}, -{"mv", torch_LongTensor_mv}, -{"mm", torch_LongTensor_mm}, -{"bmm", torch_LongTensor_bmm}, -{"ger", torch_LongTensor_ger}, -{"addmv", torch_LongTensor_addmv}, -{"addmm", torch_LongTensor_addmm}, -{"addr", torch_LongTensor_addr}, -{"addbmm", torch_LongTensor_addbmm}, -{"baddbmm", torch_LongTensor_baddbmm}, -{"numel", torch_LongTensor_numel}, -{"cumsum", torch_LongTensor_cumsum}, -{"cumprod", torch_LongTensor_cumprod}, -{"sum", torch_LongTensor_sum}, -{"prod", torch_LongTensor_prod}, -{"min", torch_LongTensor_min}, -{"max", torch_LongTensor_max}, -{"cmin", torch_LongTensor_cmin}, -{"cmax", torch_LongTensor_cmax}, -{"trace", torch_LongTensor_trace}, -{"cross", torch_LongTensor_cross}, -{"diag", torch_LongTensor_diag}, -{"eye", torch_LongTensor_eye}, -{"range", torch_LongTensor_range}, -{"randperm", torch_LongTensor_randperm}, -{"sort", torch_LongTensor_sort}, -{"topk", torch_LongTensor_topk}, -{"kthvalue", torch_LongTensor_kthvalue}, -{"mode", torch_LongTensor_mode}, -{"median", torch_LongTensor_median}, -{"tril", torch_LongTensor_tril}, -{"triu", torch_LongTensor_triu}, -{"cat", torch_LongTensor_cat}, -{"random", torch_LongTensor_random}, -{"geometric", torch_LongTensor_geometric}, -{"bernoulli", torch_LongTensor_bernoulli}, -{"squeeze", torch_LongTensor_squeeze}, -{"sign", torch_LongTensor_sign}, -{"conv2", torch_LongTensor_conv2}, -{"xcorr2", torch_LongTensor_xcorr2}, -{"conv3", torch_LongTensor_conv3}, -{"xcorr3", torch_LongTensor_xcorr3}, -{"lt", torch_LongTensor_lt}, -{"gt", torch_LongTensor_gt}, -{"le", torch_LongTensor_le}, -{"ge", torch_LongTensor_ge}, -{"eq", torch_LongTensor_eq}, -{"ne", torch_LongTensor_ne}, -{"nonzero", torch_LongTensor_nonzero}, -{"abs", torch_LongTensor_abs}, -{NULL, NULL} -}; - -static void torch_LongTensorMath_init(lua_State *L) -{ - luaT_pushmetatable(L, "torch.LongTensor"); - - /* register methods */ - luaT_setfuncs(L, m_torch_LongTensorMath__, 0); - - /* register functions into the "torch" field of the tensor metaclass */ - lua_pushstring(L, "torch"); - lua_newtable(L); - luaT_setfuncs(L, torch_LongTensorMath__, 0); - lua_rawset(L, -3); - lua_pop(L, 1); -} - -static int torch_FloatTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_zero(arg1); -return 1; -} - -static int torch_FloatTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_fill(arg1,arg2); -return 1; -} - -static int torch_FloatTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THFloatTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_FloatTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THFloatTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_FloatTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_FloatTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THFloatTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_FloatTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -float arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* index LongTensor FloatTensor | *FloatTensor* index LongTensor float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THFloatTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_FloatTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -THFloatTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor FloatTensor", type_buf); -} -arg3 = THFloatTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int torch_FloatTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -THFloatTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor FloatTensor", type_buf); -} -arg3 = THFloatTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int torch_FloatTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -THFloatTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 2; -arg4 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg6 = (float)lua_tonumber(L, 2); -arg4 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float | [*FloatTensor*] FloatTensor [float] FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_FloatTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -THFloatTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 2; -arg4 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg6 = (float)lua_tonumber(L, 2); -arg4 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float | [*FloatTensor*] FloatTensor [float] FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_FloatTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_div(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg4 = (float)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -arg4 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_FloatTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (float)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor [float]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_FloatTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [float] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_FloatTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [float] FloatTensor FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_FloatTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THFloatTensor_new(); -THFloatTensor_resize1d(arg1, arg5->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~2D FloatTensor~1D", type_buf); -} -THFloatTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_FloatTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THFloatTensor_new(); -THFloatTensor_resize2d(arg1, arg5->size[0], arg6->size[1]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~2D FloatTensor~2D", type_buf); -} -THFloatTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_FloatTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THFloatTensor_new(); -THFloatTensor_resize3d(arg1, arg5->size[0], arg5->size[1], arg6->size[2]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~3D FloatTensor~3D", type_buf); -} -THFloatTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_FloatTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THFloatTensor_new(); -THFloatTensor_resize2d(arg1, arg5->size[0], arg6->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~1D FloatTensor~1D", type_buf); -} -THFloatTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_FloatTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg4 = (float)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg4 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [float] FloatTensor~1D [float] FloatTensor~2D FloatTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_FloatTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg4 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg4 = (float)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg4 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [float] FloatTensor~2D [float] FloatTensor~2D FloatTensor~2D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_FloatTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg4 = (float)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 6, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg4 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [float] FloatTensor~2D [float] FloatTensor~1D FloatTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_FloatTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg4 = (float)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg4 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [float] FloatTensor~2D [float] FloatTensor~3D FloatTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_FloatTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg4 = (float)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg4 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [float] FloatTensor~3D [float] FloatTensor~3D FloatTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_FloatTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor", type_buf); -} -arg2 = THFloatTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_FloatTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -double arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_FloatTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -double arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_FloatTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -float arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] [*LongTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THFloatTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_FloatTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -float arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] [*LongTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THFloatTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_FloatTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (float)lua_tonumber(L, 2); -arg4 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor | [*FloatTensor*] FloatTensor float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_FloatTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (float)lua_tonumber(L, 2); -arg4 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor | [*FloatTensor*] FloatTensor float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_FloatTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -double arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor", type_buf); -} -arg2 = THFloatTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_FloatTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_FloatTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 0; -double arg4 = 1; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] double double [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_FloatTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg3 = (long)lua_tonumber(L, 1); -arg1 = THFloatTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [Generator] long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_randperm(arg1,arg2,arg3); - -THFloatTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_FloatTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg2_idx = 1; -arg1 = THFloatTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [*LongTensor*] FloatTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THFloatTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_FloatTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg2_idx = 1; -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THFloatTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [*LongTensor*] FloatTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THFloatTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_FloatTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [*LongTensor*] FloatTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THFloatTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_FloatTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg2_idx = 1; -arg1 = THFloatTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [*LongTensor*] FloatTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THFloatTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_FloatTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg2_idx = 1; -arg1 = THFloatTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [*LongTensor*] FloatTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THFloatTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_FloatTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int torch_FloatTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -long arg4 = -2; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THFloatTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THFloatTensor**)THAlloc(arg6_size * sizeof(THFloatTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.FloatTensor"))) - luaL_error(L, "expected FloatTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THFloatTensor**)THAlloc(arg6_size * sizeof(THFloatTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.FloatTensor"))) - luaL_error(L, "expected FloatTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THFloatTensor**)THAlloc(arg6_size * sizeof(THFloatTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.FloatTensor"))) - luaL_error(L, "expected FloatTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THFloatTensor**)THAlloc(arg6_size * sizeof(THFloatTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.FloatTensor"))) - luaL_error(L, "expected FloatTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor [index] | [*FloatTensor*] {FloatTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.FloatTensor"); -THFloatTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static void THFloatTensor_random2__(THFloatTensor *self, THGenerator *gen, long a, long b) -{ - THArgCheck(b >= a, 2, "upper bound must be larger than lower bound"); - TH_TENSOR_APPLY(float, self, *self_data = ((THRandom_random(gen) % (b+1-a)) + a);) -} - -static void THFloatTensor_random1__(THFloatTensor *self, THGenerator *gen, long b) -{ - THArgCheck(b > 0, 1, "upper bound must be strictly positive"); - TH_TENSOR_APPLY(float, self, *self_data = (THRandom_random(gen) % b + 1);) -} - -static int torch_FloatTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THFloatTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THFloatTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *FloatTensor* [Generator] long long | *FloatTensor* [Generator] long | *FloatTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THFloatTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THFloatTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int torch_FloatTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *FloatTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_FloatTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THFloatTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *FloatTensor* [Generator] [double] | *FloatTensor* [Generator] FloatTensor | *FloatTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THFloatTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_FloatTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | [*FloatTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THFloatTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -{int hasdims = arg4->nDimension > 1; -THFloatTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THFloatTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int torch_FloatTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_sign(arg1,arg2); -return 1; -} - -static int torch_FloatTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -float arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -float arg11 = 0; -float arg12 = 1; -THFloatTensor *arg13 = NULL; -THFloatTensor *arg14 = NULL; -float arg15 = 1; -float arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THFloatTensor *arg19 = NULL; -int arg19_idx = 0; -float arg20 = 0; -float arg21 = 1; -THFloatTensor *arg22 = NULL; -THFloatTensor *arg23 = NULL; -float arg24 = 1; -float arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THFloatTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THFloatTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THFloatTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THFloatTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~2D FloatTensor~2D [(V|F)] | [*FloatTensor*] FloatTensor~3D FloatTensor~3D [(V|F)] | [*FloatTensor*] FloatTensor~3D FloatTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.FloatTensor"); -THFloatTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.FloatTensor"); -THFloatTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_FloatTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -float arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -float arg11 = 0; -float arg12 = 1; -THFloatTensor *arg13 = NULL; -THFloatTensor *arg14 = NULL; -float arg15 = 1; -float arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THFloatTensor *arg19 = NULL; -int arg19_idx = 0; -float arg20 = 0; -float arg21 = 1; -THFloatTensor *arg22 = NULL; -THFloatTensor *arg23 = NULL; -float arg24 = 1; -float arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THFloatTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THFloatTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THFloatTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THFloatTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~2D FloatTensor~2D [(V|F)] | [*FloatTensor*] FloatTensor~3D FloatTensor~3D [(V|F)] | [*FloatTensor*] FloatTensor~3D FloatTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.FloatTensor"); -THFloatTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.FloatTensor"); -THFloatTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_FloatTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -float arg7 = 1; -float arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THFloatTensor *arg11 = NULL; -int arg11_idx = 0; -float arg12 = 0; -float arg13 = 1; -THFloatTensor *arg14 = NULL; -THFloatTensor *arg15 = NULL; -float arg16 = 1; -float arg17 = 1; -float arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THFloatTensor *arg21 = NULL; -int arg21_idx = 0; -float arg22 = 0; -float arg23 = 1; -THFloatTensor *arg24 = NULL; -THFloatTensor *arg25 = NULL; -float arg26 = 1; -float arg27 = 1; -float arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THFloatTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THFloatTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THFloatTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THFloatTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~3D FloatTensor~3D [(V|F)] | [*FloatTensor*] FloatTensor~4D FloatTensor~4D [(V|F)] | [*FloatTensor*] FloatTensor~4D FloatTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.FloatTensor"); -THFloatTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.FloatTensor"); -THFloatTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_FloatTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -float arg7 = 1; -float arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THFloatTensor *arg11 = NULL; -int arg11_idx = 0; -float arg12 = 0; -float arg13 = 1; -THFloatTensor *arg14 = NULL; -THFloatTensor *arg15 = NULL; -float arg16 = 1; -float arg17 = 1; -float arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THFloatTensor *arg21 = NULL; -int arg21_idx = 0; -float arg22 = 0; -float arg23 = 1; -THFloatTensor *arg24 = NULL; -THFloatTensor *arg25 = NULL; -float arg26 = 1; -float arg27 = 1; -float arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THFloatTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THFloatTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THFloatTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THFloatTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~3D FloatTensor~3D [(V|F)] | [*FloatTensor*] FloatTensor~4D FloatTensor~4D [(V|F)] | [*FloatTensor*] FloatTensor~4D FloatTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.FloatTensor"); -THFloatTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.FloatTensor"); -THFloatTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_FloatTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_FloatTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_FloatTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_FloatTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_FloatTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_FloatTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_FloatTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THFloatTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_FloatTensor_mean(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -double arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_meanall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_mean(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_mean(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "mean"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.mean() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_var(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -double arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg3 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] FloatTensor index [boolean]", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_varall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_var(arg3,arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_var(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "var"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.var() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_std(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -double arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg3 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] FloatTensor index [boolean]", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_stdall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_std(arg3,arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_std(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "std"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.std() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_histc(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 100; -double arg4 = 0; -double arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (double)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -arg5 = (double)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [long] [double] [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_histc(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_histc(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "histc"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.histc() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_bhistc(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 100; -double arg4 = 0; -double arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (double)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -arg5 = (double)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [long] [double] [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_bhistc(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_bhistc(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "bhistc"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.bhistc() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_norm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -float arg2 = 2; -double arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -long arg7 = 0; -int arg8 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (float)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg6 = (float)lua_tonumber(L, 2); -arg7 = (long)lua_tonumber(L, 3)-1; -arg4 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -arg7 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor [float] | [*FloatTensor*] FloatTensor float index", type_buf); -} -if(argset == 1) -{ -arg3 = THFloatTensor_normall(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_norm(arg4,arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_norm(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "norm"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.norm() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_renorm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -long arg4 = 0; -float arg5 = 0; -if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg3 = (float)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = (float)lua_tonumber(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = (float)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float index float", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_renorm(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_renorm(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "renorm"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.renorm() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_dist(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -THFloatTensor *arg2 = NULL; -float arg3 = 2; -double arg4 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor FloatTensor [float]", type_buf); -} -arg4 = THFloatTensor_dist(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} - -static int torch_dist(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "dist"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.dist() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_linspace(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 0; -long arg4 = 100; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg3 = (float)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg3 = (float)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] float float [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_linspace(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_linspace(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "linspace"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.linspace() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_logspace(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 0; -long arg4 = 100; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (float)lua_tonumber(L, 1); -arg3 = (float)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg3 = (float)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] float float [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_logspace(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_logspace(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "logspace"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.logspace() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_log(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_log(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = log(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_log(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "log"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.log() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_log1p(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_log1p(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = log1p(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_log1p(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "log1p"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.log1p() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_exp(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_exp(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = exp(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_exp(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "exp"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.exp() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_cos(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cos(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = cos(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_cos(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cos"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cos() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_acos(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_acos(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = acos(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_acos(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "acos"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.acos() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_cosh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cosh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = cosh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_cosh(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cosh"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cosh() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_sin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_sin(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sin(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_sin(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "sin"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.sin() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_asin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_asin(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = asin(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_asin(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "asin"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.asin() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_sinh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_sinh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sinh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_sinh(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "sinh"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.sinh() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_tan(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_tan(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = tan(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_tan(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "tan"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.tan() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_atan(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_atan(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = atan(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_atan(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "atan"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.atan() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_tanh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_tanh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = tanh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_tanh(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "tanh"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.tanh() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_sqrt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_sqrt(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sqrt(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_sqrt(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "sqrt"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.sqrt() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_round(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_round(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = round(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_round(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "round"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.round() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_ceil(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_ceil(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = ceil(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_ceil(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "ceil"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.ceil() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_floor(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_floor(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = floor(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_floor(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "floor"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.floor() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_trunc(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_trunc(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = trunc(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_trunc(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "trunc"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.trunc() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_abs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_abs(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = fabs(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_FloatTensor_frac(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_frac(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_frac(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_frac(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "frac"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.frac() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_rsqrt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_rsqrt(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_rsqrt(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_rsqrt(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "rsqrt"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.rsqrt() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_sigmoid(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_sigmoid(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_sigmoid(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_sigmoid(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "sigmoid"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.sigmoid() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_neg(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_neg(arg1,arg2); -return 1; -} - -static int torch_neg(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "neg"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.neg() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_cinv(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cinv(arg1,arg2); -return 1; -} - -static int torch_cinv(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cinv"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cinv() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_lerp(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -float arg4 = 0; -float arg5 = 0; -float arg6 = 0; -float arg7 = 0; -float arg8 = 0; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (float)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 4); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5 = (float)lua_tonumber(L, 1); -arg6 = (float)lua_tonumber(L, 2); -arg7 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor float | float float float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_lerp(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -arg8 = TH_lerp(arg5,arg6,arg7); -lua_pushnumber(L, (lua_Number)arg8); -return 1; -} -return 0; -} - -static int torch_lerp(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "lerp"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.lerp() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_atan2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -float arg4 = 0; -float arg5 = 0; -float arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4 = (float)lua_tonumber(L, 1); -arg5 = (float)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor | float float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_atan2(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -arg6 = atan2(arg4,arg5); -lua_pushnumber(L, (lua_Number)arg6); -return 1; -} -return 0; -} - -static int torch_atan2(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "atan2"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.atan2() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_pow(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -float arg5 = 0; -THFloatTensor *arg6 = NULL; -float arg7 = 0; -float arg8 = 0; -float arg9 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& (arg6 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 2; -arg5 = (float)lua_tonumber(L, 1); -arg4 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = (float)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 3; -arg7 = (float)lua_tonumber(L, 1); -arg8 = (float)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor float | [*FloatTensor*] float FloatTensor | float float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_pow(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_tpow(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -arg9 = pow(arg7,arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -return 0; -} - -static int torch_pow(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "pow"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.pow() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_rand(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg3 = torch_checklongargs(L, 1); -arg1 = THFloatTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [Generator] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_rand(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_rand(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "rand"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.rand() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_randn(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg3 = torch_checklongargs(L, 1); -arg1 = THFloatTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [Generator] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_randn(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_randn(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "randn"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.randn() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_multinomial(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THFloatTensor *arg3 = NULL; -int arg4 = 0; -int arg5 = 0; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (int)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (int)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 4); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (int)lua_tonumber(L, 2); -arg5 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -arg5 = lua_toboolean(L, 4); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (int)lua_tonumber(L, 3); -arg5 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 4); -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [Generator] FloatTensor int [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THFloatTensor_multinomial(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_multinomial(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "multinomial"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.multinomial() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_uniform(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -float arg7 = 0; -float arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -arg8 = (float)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -arg8 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *FloatTensor* [Generator] [float] [float]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_uniform(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THFloatTensor_uniform(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_uniform(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "uniform"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.uniform() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_normal(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -float arg7 = 0; -float arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -arg8 = (float)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -arg8 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *FloatTensor* [Generator] [float] [float]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_normal(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THFloatTensor_normal(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_normal(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "normal"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.normal() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_cauchy(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -float arg7 = 0; -float arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -arg8 = (float)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -arg8 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *FloatTensor* [Generator] [float] [float]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_cauchy(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THFloatTensor_cauchy(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_cauchy(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "cauchy"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.cauchy() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_logNormal(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 1; -double arg3 = 2; -double arg4 = 0; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -float arg7 = 1; -float arg8 = 2; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -arg8 = (float)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -arg8 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *FloatTensor* [Generator] [float] [float]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_logNormal(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THFloatTensor_logNormal(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_logNormal(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "logNormal"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.logNormal() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_exponential(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -float arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *FloatTensor* [Generator] float", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_exponential(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_exponential(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_exponential(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "exponential"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.exponential() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_gesv(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THFloatTensor *arg6 = NULL; -int arg6_idx = 0; -THFloatTensor *arg7 = NULL; -THFloatTensor *arg8 = NULL; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 2; -arg5 = THFloatTensor_new(); -arg6 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* *FloatTensor* FloatTensor FloatTensor | FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THFloatTensor_gesv(arg1,arg2,arg3,arg4); -return 2; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.FloatTensor"); -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.FloatTensor"); -THFloatTensor_gesv(arg5,arg6,arg7,arg8); -return 2; -} -return 0; -} - -static int torch_gesv(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "gesv"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.gesv() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_gels(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THFloatTensor *arg6 = NULL; -int arg6_idx = 0; -THFloatTensor *arg7 = NULL; -THFloatTensor *arg8 = NULL; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 2; -arg5 = THFloatTensor_new(); -arg6 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* *FloatTensor* FloatTensor FloatTensor | FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THFloatTensor_gels(arg1,arg2,arg3,arg4); -return 2; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.FloatTensor"); -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.FloatTensor"); -THFloatTensor_gels(arg5,arg6,arg7,arg8); -return 2; -} -return 0; -} - -static int torch_gels(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "gels"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.gels() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_trtrs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -const char *arg5 = NULL; -char arg5_default = 'U'; -const char *arg6 = NULL; -char arg6_default = 'N'; -const char *arg7 = NULL; -char arg7_default = 'N'; -THFloatTensor *arg8 = NULL; -int arg8_idx = 0; -THFloatTensor *arg9 = NULL; -int arg9_idx = 0; -THFloatTensor *arg10 = NULL; -THFloatTensor *arg11 = NULL; -const char *arg12 = NULL; -char arg12_default = 'U'; -const char *arg13 = NULL; -char arg13_default = 'N'; -const char *arg14 = NULL; -char arg14_default = 'N'; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = &arg5_default; -arg6 = &arg6_default; -arg7 = &arg7_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'U' || *arg5 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg6 = &arg6_default; -arg7 = &arg7_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg6 = lua_tostring(L, 5)) && (*arg6 == 'N' || *arg6 == 'T') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = &arg5_default; -arg7 = &arg7_default; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'U' || *arg5 == 'L') -&& (arg6 = lua_tostring(L, 6)) && (*arg6 == 'N' || *arg6 == 'T') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg7 = &arg7_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg7 = lua_tostring(L, 5)) && (*arg7 == 'N' || *arg7 == 'U') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = &arg5_default; -arg6 = &arg6_default; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'U' || *arg5 == 'L') -&& (arg7 = lua_tostring(L, 6)) && (*arg7 == 'N' || *arg7 == 'U') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg6 = &arg6_default; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg6 = lua_tostring(L, 5)) && (*arg6 == 'N' || *arg6 == 'T') -&& (arg7 = lua_tostring(L, 6)) && (*arg7 == 'N' || *arg7 == 'U') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = &arg5_default; -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'U' || *arg5 == 'L') -&& (arg6 = lua_tostring(L, 6)) && (*arg6 == 'N' || *arg6 == 'T') -&& (arg7 = lua_tostring(L, 7)) && (*arg7 == 'N' || *arg7 == 'U') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 2; -arg8 = THFloatTensor_new(); -arg9 = THFloatTensor_new(); -arg12 = &arg12_default; -arg13 = &arg13_default; -arg14 = &arg14_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = lua_tostring(L, 3)) && (*arg12 == 'U' || *arg12 == 'L') -) -{ -argset = 2; -arg8 = THFloatTensor_new(); -arg9 = THFloatTensor_new(); -arg13 = &arg13_default; -arg14 = &arg14_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg13 = lua_tostring(L, 3)) && (*arg13 == 'N' || *arg13 == 'T') -) -{ -argset = 2; -arg8 = THFloatTensor_new(); -arg9 = THFloatTensor_new(); -arg12 = &arg12_default; -arg14 = &arg14_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = lua_tostring(L, 3)) && (*arg12 == 'U' || *arg12 == 'L') -&& (arg13 = lua_tostring(L, 4)) && (*arg13 == 'N' || *arg13 == 'T') -) -{ -argset = 2; -arg8 = THFloatTensor_new(); -arg9 = THFloatTensor_new(); -arg14 = &arg14_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg14 = lua_tostring(L, 3)) && (*arg14 == 'N' || *arg14 == 'U') -) -{ -argset = 2; -arg8 = THFloatTensor_new(); -arg9 = THFloatTensor_new(); -arg12 = &arg12_default; -arg13 = &arg13_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = lua_tostring(L, 3)) && (*arg12 == 'U' || *arg12 == 'L') -&& (arg14 = lua_tostring(L, 4)) && (*arg14 == 'N' || *arg14 == 'U') -) -{ -argset = 2; -arg8 = THFloatTensor_new(); -arg9 = THFloatTensor_new(); -arg13 = &arg13_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg13 = lua_tostring(L, 3)) && (*arg13 == 'N' || *arg13 == 'T') -&& (arg14 = lua_tostring(L, 4)) && (*arg14 == 'N' || *arg14 == 'U') -) -{ -argset = 2; -arg8 = THFloatTensor_new(); -arg9 = THFloatTensor_new(); -arg12 = &arg12_default; -} -else if(narg == 5 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = lua_tostring(L, 3)) && (*arg12 == 'U' || *arg12 == 'L') -&& (arg13 = lua_tostring(L, 4)) && (*arg13 == 'N' || *arg13 == 'T') -&& (arg14 = lua_tostring(L, 5)) && (*arg14 == 'N' || *arg14 == 'U') -) -{ -argset = 2; -arg8 = THFloatTensor_new(); -arg9 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* *FloatTensor* FloatTensor FloatTensor [(U|L)] [(N|T)] [(N|U)] | FloatTensor FloatTensor [(U|L)] [(N|T)] [(N|U)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THFloatTensor_trtrs(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -return 2; -} -else if(argset == 2) -{ -if(arg8_idx) -lua_pushvalue(L, arg8_idx); -else -luaT_pushudata(L, arg8, "torch.FloatTensor"); -if(arg9_idx) -lua_pushvalue(L, arg9_idx); -else -luaT_pushudata(L, arg9, "torch.FloatTensor"); -THFloatTensor_trtrs(arg8,arg9,arg10,arg11,arg12,arg13,arg14); -return 2; -} -return 0; -} - -static int torch_trtrs(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "trtrs"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.trtrs() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_symeig(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -const char *arg4 = NULL; -char arg4_default = 'N'; -const char *arg5 = NULL; -char arg5_default = 'U'; -THFloatTensor *arg6 = NULL; -int arg6_idx = 0; -THFloatTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -const char *arg9 = NULL; -char arg9_default = 'N'; -const char *arg10 = NULL; -char arg10_default = 'U'; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg4 = &arg4_default; -arg5 = &arg5_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'N' || *arg4 == 'V') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = &arg5_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg5 = lua_tostring(L, 4)) && (*arg5 == 'U' || *arg5 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg4 = &arg4_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'N' || *arg4 == 'V') -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'U' || *arg5 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg6 = THFloatTensor_new(); -arg7 = THFloatTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = lua_tostring(L, 2)) && (*arg9 == 'N' || *arg9 == 'V') -) -{ -argset = 2; -arg6 = THFloatTensor_new(); -arg7 = THFloatTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg10 = lua_tostring(L, 2)) && (*arg10 == 'U' || *arg10 == 'L') -) -{ -argset = 2; -arg6 = THFloatTensor_new(); -arg7 = THFloatTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = lua_tostring(L, 2)) && (*arg9 == 'N' || *arg9 == 'V') -&& (arg10 = lua_tostring(L, 3)) && (*arg10 == 'U' || *arg10 == 'L') -) -{ -argset = 2; -arg6 = THFloatTensor_new(); -arg7 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* *FloatTensor* FloatTensor [(N|V)] [(U|L)] | FloatTensor [(N|V)] [(U|L)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THFloatTensor_syev(arg1,arg2,arg3,arg4,arg5); -return 2; -} -else if(argset == 2) -{ -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.FloatTensor"); -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.FloatTensor"); -THFloatTensor_syev(arg6,arg7,arg8,arg9,arg10); -return 2; -} -return 0; -} - -static int torch_symeig(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "symeig"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.symeig() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_eig(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -const char *arg4 = NULL; -char arg4_default = 'N'; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THFloatTensor *arg6 = NULL; -int arg6_idx = 0; -THFloatTensor *arg7 = NULL; -const char *arg8 = NULL; -char arg8_default = 'N'; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg4 = &arg4_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'N' || *arg4 == 'V') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 1 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg5 = THFloatTensor_new(); -arg6 = THFloatTensor_new(); -arg8 = &arg8_default; -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg8 = lua_tostring(L, 2)) && (*arg8 == 'N' || *arg8 == 'V') -) -{ -argset = 2; -arg5 = THFloatTensor_new(); -arg6 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* *FloatTensor* FloatTensor [(N|V)] | FloatTensor [(N|V)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THFloatTensor_geev(arg1,arg2,arg3,arg4); -return 2; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.FloatTensor"); -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.FloatTensor"); -THFloatTensor_geev(arg5,arg6,arg7,arg8); -return 2; -} -return 0; -} - -static int torch_eig(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "eig"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.eig() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_svd(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -const char *arg5 = NULL; -char arg5_default = 'S'; -THFloatTensor *arg6 = NULL; -int arg6_idx = 0; -THFloatTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -int arg8_idx = 0; -THFloatTensor *arg9 = NULL; -const char *arg10 = NULL; -char arg10_default = 'S'; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg3_idx = 3; -arg5 = &arg5_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'A' || *arg5 == 'S') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg3_idx = 3; -} -else if(narg == 1 -&& (arg9 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg6 = THFloatTensor_new(); -arg7 = THFloatTensor_new(); -arg8 = THFloatTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg9 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg10 = lua_tostring(L, 2)) && (*arg10 == 'A' || *arg10 == 'S') -) -{ -argset = 2; -arg6 = THFloatTensor_new(); -arg7 = THFloatTensor_new(); -arg8 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* *FloatTensor* *FloatTensor* FloatTensor [(A|S)] | FloatTensor [(A|S)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -lua_pushvalue(L, arg3_idx); -THFloatTensor_gesvd(arg1,arg2,arg3,arg4,arg5); -return 3; -} -else if(argset == 2) -{ -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.FloatTensor"); -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.FloatTensor"); -if(arg8_idx) -lua_pushvalue(L, arg8_idx); -else -luaT_pushudata(L, arg8, "torch.FloatTensor"); -THFloatTensor_gesvd(arg6,arg7,arg8,arg9,arg10); -return 3; -} -return 0; -} - -static int torch_svd(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "svd"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.svd() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_inverse(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg3 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor | FloatTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_getri(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_getri(arg3,arg4); -return 1; -} -return 0; -} - -static int torch_inverse(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "inverse"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.inverse() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_potrf(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -const char *arg3 = NULL; -char arg3_default = 'U'; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -const char *arg6 = NULL; -char arg6_default = 'U'; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = &arg3_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = lua_tostring(L, 3)) && (*arg3 == 'U' || *arg3 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg4 = THFloatTensor_new(); -arg6 = &arg6_default; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = lua_tostring(L, 2)) && (*arg6 == 'U' || *arg6 == 'L') -) -{ -argset = 2; -arg4 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor [(U|L)] | FloatTensor [(U|L)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_potrf(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_potrf(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_potrf(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "potrf"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.potrf() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_potrs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -const char *arg4 = NULL; -char arg4_default = 'U'; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THFloatTensor *arg6 = NULL; -THFloatTensor *arg7 = NULL; -const char *arg8 = NULL; -char arg8_default = 'U'; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = &arg4_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'U' || *arg4 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg6 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 2; -arg5 = THFloatTensor_new(); -arg8 = &arg8_default; -} -else if(narg == 3 -&& (arg6 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'U' || *arg8 == 'L') -) -{ -argset = 2; -arg5 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor FloatTensor [(U|L)] | FloatTensor FloatTensor [(U|L)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_potrs(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.FloatTensor"); -THFloatTensor_potrs(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_potrs(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "potrs"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.potrs() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_potri(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -const char *arg3 = NULL; -char arg3_default = 'U'; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -const char *arg6 = NULL; -char arg6_default = 'U'; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = &arg3_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = lua_tostring(L, 3)) && (*arg3 == 'U' || *arg3 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg4 = THFloatTensor_new(); -arg6 = &arg6_default; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = lua_tostring(L, 2)) && (*arg6 == 'U' || *arg6 == 'L') -) -{ -argset = 2; -arg4 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor [(U|L)] | FloatTensor [(U|L)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_potri(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_potri(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_potri(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "potri"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.potri() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_pstrf(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -const char *arg4 = NULL; -char arg4_default = 'U'; -float arg5 = -1; -THFloatTensor *arg6 = NULL; -int arg6_idx = 0; -THIntTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -const char *arg9 = NULL; -char arg9_default = 'U'; -float arg10 = -1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg4 = &arg4_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'U' || *arg4 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = (float)lua_tonumber(L, 4); -arg4 = &arg4_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'U' || *arg4 == 'L') -&& lua_isnumber(L, 5) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = (float)lua_tonumber(L, 5); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg6 = THFloatTensor_new(); -arg7 = THIntTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = lua_tostring(L, 2)) && (*arg9 == 'U' || *arg9 == 'L') -) -{ -argset = 2; -arg6 = THFloatTensor_new(); -arg7 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg10 = (float)lua_tonumber(L, 2); -arg6 = THFloatTensor_new(); -arg7 = THIntTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = lua_tostring(L, 2)) && (*arg9 == 'U' || *arg9 == 'L') -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg10 = (float)lua_tonumber(L, 3); -arg6 = THFloatTensor_new(); -arg7 = THIntTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* *IntTensor* FloatTensor [(U|L)] [float] | FloatTensor [(U|L)] [float]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THFloatTensor_pstrf(arg1,arg2,arg3,arg4,arg5); -return 2; -} -else if(argset == 2) -{ -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.FloatTensor"); -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.IntTensor"); -THFloatTensor_pstrf(arg6,arg7,arg8,arg9,arg10); -return 2; -} -return 0; -} - -static int torch_pstrf(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "pstrf"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.pstrf() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_qr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 1 -&& (arg6 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg4 = THFloatTensor_new(); -arg5 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* *FloatTensor* FloatTensor | FloatTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THFloatTensor_qr(arg1,arg2,arg3); -return 2; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.FloatTensor"); -THFloatTensor_qr(arg4,arg5,arg6); -return 2; -} -return 0; -} - -static int torch_qr(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "qr"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.qr() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_geqrf(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 1 -&& (arg6 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg4 = THFloatTensor_new(); -arg5 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* *FloatTensor* FloatTensor | FloatTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THFloatTensor_geqrf(arg1,arg2,arg3); -return 2; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.FloatTensor"); -THFloatTensor_geqrf(arg4,arg5,arg6); -return 2; -} -return 0; -} - -static int torch_geqrf(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "geqrf"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.geqrf() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_orgqr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 2; -arg4 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor FloatTensor | FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_orgqr(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_orgqr(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_orgqr(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "orgqr"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.orgqr() function", tname); - - return lua_gettop(L); -} - -static int torch_FloatTensor_ormqr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -const char *arg5 = NULL; -char arg5_default = 'L'; -const char *arg6 = NULL; -char arg6_default = 'N'; -THFloatTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -const char *arg11 = NULL; -char arg11_default = 'L'; -const char *arg12 = NULL; -char arg12_default = 'N'; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg5 = &arg5_default; -arg6 = &arg6_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'L' || *arg5 == 'R') -) -{ -argset = 1; -arg1_idx = 1; -arg6 = &arg6_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg6 = lua_tostring(L, 5)) && (*arg6 == 'N' || *arg6 == 'T') -) -{ -argset = 1; -arg1_idx = 1; -arg5 = &arg5_default; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'L' || *arg5 == 'R') -&& (arg6 = lua_tostring(L, 6)) && (*arg6 == 'N' || *arg6 == 'T') -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg10 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg7 = THFloatTensor_new(); -arg11 = &arg11_default; -arg12 = &arg12_default; -} -else if(narg == 4 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg10 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg11 = lua_tostring(L, 4)) && (*arg11 == 'L' || *arg11 == 'R') -) -{ -argset = 2; -arg7 = THFloatTensor_new(); -arg12 = &arg12_default; -} -else if(narg == 4 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg10 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg12 = lua_tostring(L, 4)) && (*arg12 == 'N' || *arg12 == 'T') -) -{ -argset = 2; -arg7 = THFloatTensor_new(); -arg11 = &arg11_default; -} -else if(narg == 5 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg10 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg11 = lua_tostring(L, 4)) && (*arg11 == 'L' || *arg11 == 'R') -&& (arg12 = lua_tostring(L, 5)) && (*arg12 == 'N' || *arg12 == 'T') -) -{ -argset = 2; -arg7 = THFloatTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor FloatTensor FloatTensor [(L|R)] [(N|T)] | FloatTensor FloatTensor FloatTensor [(L|R)] [(N|T)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_ormqr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.FloatTensor"); -THFloatTensor_ormqr(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_ormqr(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "ormqr"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.ormqr() function", tname); - - return lua_gettop(L); -} - -static int m_torch_FloatTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_zero(arg1); -return 1; -} - -static int m_torch_FloatTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_fill(arg1,arg2); -return 1; -} - -static int m_torch_FloatTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_FloatTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_FloatTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_FloatTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THFloatTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_FloatTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -float arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* index LongTensor FloatTensor | *FloatTensor* index LongTensor float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THFloatTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -THFloatTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor FloatTensor", type_buf); -} -arg3 = THFloatTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int m_torch_FloatTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -THFloatTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor FloatTensor", type_buf); -} -arg3 = THFloatTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int m_torch_FloatTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -THFloatTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float | *FloatTensor* [FloatTensor] [float] FloatTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -THFloatTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float | *FloatTensor* [FloatTensor] [float] FloatTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_div(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg4 = (float)lua_tonumber(L, 3); -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -arg4 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_FloatTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor FloatTensor [float]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_FloatTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] [float] FloatTensor FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_FloatTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] [float] FloatTensor FloatTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_FloatTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor~2D FloatTensor~1D", type_buf); -} -THFloatTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THFloatTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_FloatTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor~2D FloatTensor~2D", type_buf); -} -THFloatTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THFloatTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_FloatTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor~3D FloatTensor~3D", type_buf); -} -THFloatTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THFloatTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_FloatTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* FloatTensor~1D FloatTensor~1D", type_buf); -} -THFloatTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THFloatTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_FloatTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -THFloatTensor *arg7 = NULL; -int arg7_idx = 0; -float arg8 = 0; -THFloatTensor *arg9 = NULL; -float arg10 = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -arg10 = (float)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg9->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.FloatTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -arg10 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor~1D* [FloatTensor~1D] [float] FloatTensor~2D FloatTensor~1D | *FloatTensor~1D* float [FloatTensor~1D] float FloatTensor~2D FloatTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THFloatTensor_addmv(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -THFloatTensor *arg7 = NULL; -int arg7_idx = 0; -float arg8 = 0; -THFloatTensor *arg9 = NULL; -float arg10 = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -arg10 = (float)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.FloatTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -arg10 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor~2D* [FloatTensor~2D] [float] FloatTensor~2D FloatTensor~2D | *FloatTensor~2D* float [FloatTensor~2D] float FloatTensor~2D FloatTensor~2D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THFloatTensor_addmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -THFloatTensor *arg7 = NULL; -int arg7_idx = 0; -float arg8 = 0; -THFloatTensor *arg9 = NULL; -float arg10 = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -arg10 = (float)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 6, "torch.FloatTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -arg10 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor~2D* [FloatTensor~2D] [float] FloatTensor~1D FloatTensor~1D | *FloatTensor~2D* float [FloatTensor~2D] float FloatTensor~1D FloatTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THFloatTensor_addr(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -THFloatTensor *arg7 = NULL; -int arg7_idx = 0; -float arg8 = 0; -THFloatTensor *arg9 = NULL; -float arg10 = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -arg10 = (float)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.FloatTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -arg10 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor~2D* [FloatTensor~2D] [float] FloatTensor~3D FloatTensor~3D | *FloatTensor~2D* float [FloatTensor~2D] float FloatTensor~3D FloatTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THFloatTensor_addbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 1; -THFloatTensor *arg3 = NULL; -float arg4 = 1; -THFloatTensor *arg5 = NULL; -THFloatTensor *arg6 = NULL; -THFloatTensor *arg7 = NULL; -int arg7_idx = 0; -float arg8 = 0; -THFloatTensor *arg9 = NULL; -float arg10 = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.FloatTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -arg10 = (float)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg9->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.FloatTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.FloatTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -arg10 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor~3D* [FloatTensor~3D] [float] FloatTensor~3D FloatTensor~3D | *FloatTensor~3D* float [FloatTensor~3D] float FloatTensor~3D FloatTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THFloatTensor_baddbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor", type_buf); -} -arg2 = THFloatTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_FloatTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -double arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -double arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -float arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] [*LongTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THFloatTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_FloatTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -float arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] [*LongTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THFloatTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_FloatTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (float)lua_tonumber(L, 1); -arg4 = THFloatTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (float)lua_tonumber(L, 2); -arg4 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [FloatTensor] FloatTensor | [*FloatTensor*] [FloatTensor] float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (float)lua_tonumber(L, 1); -arg4 = THFloatTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (float)lua_tonumber(L, 2); -arg4 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [FloatTensor] FloatTensor | [*FloatTensor*] [FloatTensor] float", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -double arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor", type_buf); -} -arg2 = THFloatTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_FloatTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_FloatTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 0; -double arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* double double [double]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_FloatTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [Generator] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_randperm(arg1,arg2,arg3); - -THFloatTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_FloatTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg2_idx = 1; -arg1 = THFloatTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [*LongTensor*] FloatTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THFloatTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_FloatTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg2_idx = 1; -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THFloatTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THFloatTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [*LongTensor*] FloatTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THFloatTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_FloatTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [*LongTensor*] FloatTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THFloatTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_FloatTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg2_idx = 1; -arg1 = THFloatTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [*LongTensor*] FloatTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THFloatTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_FloatTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THFloatTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg2_idx = 1; -arg1 = THFloatTensor_new(); -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THFloatTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THFloatTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] [*LongTensor*] FloatTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THFloatTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_FloatTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int m_torch_FloatTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -long arg4 = -2; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THFloatTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THFloatTensor**)THAlloc(arg6_size * sizeof(THFloatTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.FloatTensor"))) - luaL_error(L, "expected FloatTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THFloatTensor**)THAlloc(arg6_size * sizeof(THFloatTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.FloatTensor"))) - luaL_error(L, "expected FloatTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THFloatTensor**)THAlloc(arg6_size * sizeof(THFloatTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.FloatTensor"))) - luaL_error(L, "expected FloatTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THFloatTensor**)THAlloc(arg6_size * sizeof(THFloatTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.FloatTensor"))) - luaL_error(L, "expected FloatTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor FloatTensor [index] | [*FloatTensor*] {FloatTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.FloatTensor"); -THFloatTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THFloatTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THFloatTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *FloatTensor* [Generator] long long | *FloatTensor* [Generator] long | *FloatTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THFloatTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THFloatTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *FloatTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THFloatTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *FloatTensor* [Generator] [double] | *FloatTensor* [Generator] FloatTensor | *FloatTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THFloatTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor | [*FloatTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THFloatTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -{int hasdims = arg4->nDimension > 1; -THFloatTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THFloatTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_sign(arg1,arg2); -return 1; -} - -static int m_torch_FloatTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -float arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -float arg11 = 0; -float arg12 = 1; -THFloatTensor *arg13 = NULL; -THFloatTensor *arg14 = NULL; -float arg15 = 1; -float arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THFloatTensor *arg19 = NULL; -int arg19_idx = 0; -float arg20 = 0; -float arg21 = 1; -THFloatTensor *arg22 = NULL; -THFloatTensor *arg23 = NULL; -float arg24 = 1; -float arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THFloatTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THFloatTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THFloatTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THFloatTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~2D FloatTensor~2D [(V|F)] | [*FloatTensor*] FloatTensor~3D FloatTensor~3D [(V|F)] | [*FloatTensor*] FloatTensor~3D FloatTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.FloatTensor"); -THFloatTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.FloatTensor"); -THFloatTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -float arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -float arg11 = 0; -float arg12 = 1; -THFloatTensor *arg13 = NULL; -THFloatTensor *arg14 = NULL; -float arg15 = 1; -float arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THFloatTensor *arg19 = NULL; -int arg19_idx = 0; -float arg20 = 0; -float arg21 = 1; -THFloatTensor *arg22 = NULL; -THFloatTensor *arg23 = NULL; -float arg24 = 1; -float arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THFloatTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THFloatTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THFloatTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THFloatTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~2D FloatTensor~2D [(V|F)] | [*FloatTensor*] FloatTensor~3D FloatTensor~3D [(V|F)] | [*FloatTensor*] FloatTensor~3D FloatTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.FloatTensor"); -THFloatTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.FloatTensor"); -THFloatTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -float arg7 = 1; -float arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THFloatTensor *arg11 = NULL; -int arg11_idx = 0; -float arg12 = 0; -float arg13 = 1; -THFloatTensor *arg14 = NULL; -THFloatTensor *arg15 = NULL; -float arg16 = 1; -float arg17 = 1; -float arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THFloatTensor *arg21 = NULL; -int arg21_idx = 0; -float arg22 = 0; -float arg23 = 1; -THFloatTensor *arg24 = NULL; -THFloatTensor *arg25 = NULL; -float arg26 = 1; -float arg27 = 1; -float arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THFloatTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THFloatTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THFloatTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THFloatTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~3D FloatTensor~3D [(V|F)] | [*FloatTensor*] FloatTensor~4D FloatTensor~4D [(V|F)] | [*FloatTensor*] FloatTensor~4D FloatTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.FloatTensor"); -THFloatTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.FloatTensor"); -THFloatTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 1; -THFloatTensor *arg4 = NULL; -THFloatTensor *arg5 = NULL; -float arg6 = 1; -float arg7 = 1; -float arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THFloatTensor *arg11 = NULL; -int arg11_idx = 0; -float arg12 = 0; -float arg13 = 1; -THFloatTensor *arg14 = NULL; -THFloatTensor *arg15 = NULL; -float arg16 = 1; -float arg17 = 1; -float arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THFloatTensor *arg21 = NULL; -int arg21_idx = 0; -float arg22 = 0; -float arg23 = 1; -THFloatTensor *arg24 = NULL; -THFloatTensor *arg25 = NULL; -float arg26 = 1; -float arg27 = 1; -float arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THFloatTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THFloatTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THFloatTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THFloatTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THFloatTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.FloatTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.FloatTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor~3D FloatTensor~3D [(V|F)] | [*FloatTensor*] FloatTensor~4D FloatTensor~4D [(V|F)] | [*FloatTensor*] FloatTensor~4D FloatTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.FloatTensor"); -THFloatTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.FloatTensor"); -THFloatTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THFloatTensor *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THFloatTensor *arg10 = NULL; -int arg10_idx = 0; -THFloatTensor *arg11 = NULL; -THFloatTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (float)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] FloatTensor float | *FloatTensor* FloatTensor float | [*ByteTensor*] FloatTensor FloatTensor | *FloatTensor* FloatTensor FloatTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THFloatTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THFloatTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THFloatTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] FloatTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THFloatTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_FloatTensor_mean(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -double arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] FloatTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_meanall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_mean(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_var(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -double arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg3 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] FloatTensor index [boolean]", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_varall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_var(arg3,arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_std(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -double arg2 = 0; -THFloatTensor *arg3 = NULL; -int arg3_idx = 0; -THFloatTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg3 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor | [*FloatTensor*] FloatTensor index [boolean]", type_buf); -} -if(argset == 1) -{ -arg2 = THFloatTensor_stdall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.FloatTensor"); -THFloatTensor_std(arg3,arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_histc(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 100; -double arg4 = 0; -double arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (double)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -arg5 = (double)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [long] [double] [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_histc(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_FloatTensor_bhistc(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -long arg3 = 100; -double arg4 = 0; -double arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1 = THFloatTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (double)lua_tonumber(L, 2); -arg1 = THFloatTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -arg1 = THFloatTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -arg5 = (double)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*FloatTensor*] FloatTensor [long] [double] [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.FloatTensor"); -THFloatTensor_bhistc(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_FloatTensor_norm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -float arg2 = 2; -double arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THFloatTensor *arg5 = NULL; -float arg6 = 0; -long arg7 = 0; -int arg8 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (float)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg6 = (float)lua_tonumber(L, 2); -arg7 = (long)lua_tonumber(L, 3)-1; -arg4 = THFloatTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -arg7 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor [float] | [*FloatTensor*] FloatTensor float index", type_buf); -} -if(argset == 1) -{ -arg3 = THFloatTensor_normall(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.FloatTensor"); -THFloatTensor_norm(arg4,arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_renorm(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -long arg4 = 0; -float arg5 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = (float)lua_tonumber(L, 4); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = (float)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float index float", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_renorm(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_FloatTensor_dist(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -THFloatTensor *arg2 = NULL; -float arg3 = 2; -double arg4 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -arg3 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: FloatTensor FloatTensor [float]", type_buf); -} -arg4 = THFloatTensor_dist(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} - -static int m_torch_FloatTensor_linspace(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 0; -long arg4 = 100; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg3 = (float)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* float float [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_linspace(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_FloatTensor_logspace(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -float arg2 = 0; -float arg3 = 0; -long arg4 = 100; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (float)lua_tonumber(L, 2); -arg3 = (float)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* float float [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_logspace(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_FloatTensor_log(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_log(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = log(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_log1p(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_log1p(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = log1p(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_exp(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_exp(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = exp(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_cos(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_cos(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = cos(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_acos(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_acos(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = acos(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_cosh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_cosh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = cosh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_sin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_sin(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sin(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_asin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_asin(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = asin(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_sinh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_sinh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sinh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_tan(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_tan(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = tan(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_atan(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_atan(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = atan(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_tanh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_tanh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = tanh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_sqrt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_sqrt(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sqrt(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_round(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_round(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = round(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_ceil(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_ceil(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = ceil(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_floor(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_floor(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = floor(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_trunc(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_trunc(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = trunc(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_abs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_abs(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = fabs(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_frac(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_frac(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_frac(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_rsqrt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_rsqrt(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_rsqrt(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_sigmoid(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -float arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (float)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] | float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_sigmoid(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_sigmoid(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_neg(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_neg(arg1,arg2); -return 1; -} - -static int m_torch_FloatTensor_cinv(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_cinv(arg1,arg2); -return 1; -} - -static int m_torch_FloatTensor_lerp(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -float arg4 = 0; -float arg5 = 0; -float arg6 = 0; -float arg7 = 0; -float arg8 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 3); -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (float)lua_tonumber(L, 4); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5 = (float)lua_tonumber(L, 1); -arg6 = (float)lua_tonumber(L, 2); -arg7 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor float | float float float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_lerp(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -arg8 = TH_lerp(arg5,arg6,arg7); -lua_pushnumber(L, (lua_Number)arg8); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_atan2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -THFloatTensor *arg3 = NULL; -float arg4 = 0; -float arg5 = 0; -float arg6 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4 = (float)lua_tonumber(L, 1); -arg5 = (float)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] FloatTensor | float float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_atan2(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -arg6 = atan2(arg4,arg5); -lua_pushnumber(L, (lua_Number)arg6); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_pow(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THFloatTensor *arg2 = NULL; -float arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -float arg5 = 0; -THFloatTensor *arg6 = NULL; -float arg7 = 0; -float arg8 = 0; -float arg9 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = (float)lua_tonumber(L, 2); -arg6 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& (arg6 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = (float)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 3; -arg7 = (float)lua_tonumber(L, 1); -arg8 = (float)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [FloatTensor] float | *FloatTensor* float [FloatTensor] | float float", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THFloatTensor_pow(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_tpow(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -arg9 = pow(arg7,arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_rand(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [Generator] (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_rand(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_FloatTensor_randn(lua_State *L) -{ -int narg = lua_gettop(L); -THFloatTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *FloatTensor* [Generator] (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_randn(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_FloatTensor_multinomial(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THFloatTensor *arg3 = NULL; -int arg4 = 0; -int arg5 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 4); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.FloatTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -arg5 = lua_toboolean(L, 4); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& (arg3 = luaT_toudata(L, 3, "torch.FloatTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 4); -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [Generator] FloatTensor int [boolean]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THFloatTensor_multinomial(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_FloatTensor_uniform(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -float arg7 = 0; -float arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -arg8 = (float)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -arg8 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *FloatTensor* [Generator] [float] [float]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_uniform(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THFloatTensor_uniform(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_normal(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -float arg7 = 0; -float arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -arg8 = (float)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -arg8 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *FloatTensor* [Generator] [float] [float]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_normal(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THFloatTensor_normal(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_cauchy(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -float arg7 = 0; -float arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -arg8 = (float)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -arg8 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *FloatTensor* [Generator] [float] [float]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_cauchy(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THFloatTensor_cauchy(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_logNormal(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 1; -double arg3 = 2; -double arg4 = 0; -THFloatTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -float arg7 = 1; -float arg8 = 2; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (float)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 2); -arg8 = (float)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (float)lua_tonumber(L, 3); -arg8 = (float)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *FloatTensor* [Generator] [float] [float]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_logNormal(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THFloatTensor_logNormal(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_FloatTensor_exponential(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THFloatTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -float arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.FloatTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (float)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *FloatTensor* [Generator] float", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_exponential(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THFloatTensor_exponential(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static const struct luaL_Reg m_torch_FloatTensorMath__ [] = { -{"zero", m_torch_FloatTensor_zero}, -{"fill", m_torch_FloatTensor_fill}, -{"zeros", m_torch_FloatTensor_zeros}, -{"ones", m_torch_FloatTensor_ones}, -{"reshape", m_torch_FloatTensor_reshape}, -{"gather", m_torch_FloatTensor_gather}, -{"scatter", m_torch_FloatTensor_scatter}, -{"dot", m_torch_FloatTensor_dot}, -{"equal", m_torch_FloatTensor_equal}, -{"add", m_torch_FloatTensor_add}, -{"csub", m_torch_FloatTensor_csub}, -{"mul", m_torch_FloatTensor_mul}, -{"div", m_torch_FloatTensor_div}, -{"lshift", m_torch_FloatTensor_lshift}, -{"rshift", m_torch_FloatTensor_rshift}, -{"fmod", m_torch_FloatTensor_fmod}, -{"remainder", m_torch_FloatTensor_remainder}, -{"bitand", m_torch_FloatTensor_bitand}, -{"bitor", m_torch_FloatTensor_bitor}, -{"bitxor", m_torch_FloatTensor_bitxor}, -{"mod", m_torch_FloatTensor_mod}, -{"clamp", m_torch_FloatTensor_clamp}, -{"match", m_torch_FloatTensor_match}, -{"cmul", m_torch_FloatTensor_cmul}, -{"cpow", m_torch_FloatTensor_cpow}, -{"cdiv", m_torch_FloatTensor_cdiv}, -{"clshift", m_torch_FloatTensor_clshift}, -{"crshift", m_torch_FloatTensor_crshift}, -{"cfmod", m_torch_FloatTensor_cfmod}, -{"cremainder", m_torch_FloatTensor_cremainder}, -{"cbitand", m_torch_FloatTensor_cbitand}, -{"cbitor", m_torch_FloatTensor_cbitor}, -{"cbitxor", m_torch_FloatTensor_cbitxor}, -{"cmod", m_torch_FloatTensor_cmod}, -{"addcmul", m_torch_FloatTensor_addcmul}, -{"addcdiv", m_torch_FloatTensor_addcdiv}, -{"mv", m_torch_FloatTensor_mv}, -{"mm", m_torch_FloatTensor_mm}, -{"bmm", m_torch_FloatTensor_bmm}, -{"ger", m_torch_FloatTensor_ger}, -{"addmv", m_torch_FloatTensor_addmv}, -{"addmm", m_torch_FloatTensor_addmm}, -{"addr", m_torch_FloatTensor_addr}, -{"addbmm", m_torch_FloatTensor_addbmm}, -{"baddbmm", m_torch_FloatTensor_baddbmm}, -{"numel", m_torch_FloatTensor_numel}, -{"cumsum", m_torch_FloatTensor_cumsum}, -{"cumprod", m_torch_FloatTensor_cumprod}, -{"sum", m_torch_FloatTensor_sum}, -{"prod", m_torch_FloatTensor_prod}, -{"min", m_torch_FloatTensor_min}, -{"max", m_torch_FloatTensor_max}, -{"cmin", m_torch_FloatTensor_cmin}, -{"cmax", m_torch_FloatTensor_cmax}, -{"trace", m_torch_FloatTensor_trace}, -{"cross", m_torch_FloatTensor_cross}, -{"diag", m_torch_FloatTensor_diag}, -{"eye", m_torch_FloatTensor_eye}, -{"range", m_torch_FloatTensor_range}, -{"randperm", m_torch_FloatTensor_randperm}, -{"sort", m_torch_FloatTensor_sort}, -{"topk", m_torch_FloatTensor_topk}, -{"kthvalue", m_torch_FloatTensor_kthvalue}, -{"mode", m_torch_FloatTensor_mode}, -{"median", m_torch_FloatTensor_median}, -{"tril", m_torch_FloatTensor_tril}, -{"triu", m_torch_FloatTensor_triu}, -{"cat", m_torch_FloatTensor_cat}, -{"random", m_torch_FloatTensor_random}, -{"geometric", m_torch_FloatTensor_geometric}, -{"bernoulli", m_torch_FloatTensor_bernoulli}, -{"squeeze", m_torch_FloatTensor_squeeze}, -{"sign", m_torch_FloatTensor_sign}, -{"conv2", m_torch_FloatTensor_conv2}, -{"xcorr2", m_torch_FloatTensor_xcorr2}, -{"conv3", m_torch_FloatTensor_conv3}, -{"xcorr3", m_torch_FloatTensor_xcorr3}, -{"lt", m_torch_FloatTensor_lt}, -{"gt", m_torch_FloatTensor_gt}, -{"le", m_torch_FloatTensor_le}, -{"ge", m_torch_FloatTensor_ge}, -{"eq", m_torch_FloatTensor_eq}, -{"ne", m_torch_FloatTensor_ne}, -{"nonzero", m_torch_FloatTensor_nonzero}, -{"mean", m_torch_FloatTensor_mean}, -{"var", m_torch_FloatTensor_var}, -{"std", m_torch_FloatTensor_std}, -{"histc", m_torch_FloatTensor_histc}, -{"bhistc", m_torch_FloatTensor_bhistc}, -{"norm", m_torch_FloatTensor_norm}, -{"renorm", m_torch_FloatTensor_renorm}, -{"dist", m_torch_FloatTensor_dist}, -{"linspace", m_torch_FloatTensor_linspace}, -{"logspace", m_torch_FloatTensor_logspace}, -{"log", m_torch_FloatTensor_log}, -{"log1p", m_torch_FloatTensor_log1p}, -{"exp", m_torch_FloatTensor_exp}, -{"cos", m_torch_FloatTensor_cos}, -{"acos", m_torch_FloatTensor_acos}, -{"cosh", m_torch_FloatTensor_cosh}, -{"sin", m_torch_FloatTensor_sin}, -{"asin", m_torch_FloatTensor_asin}, -{"sinh", m_torch_FloatTensor_sinh}, -{"tan", m_torch_FloatTensor_tan}, -{"atan", m_torch_FloatTensor_atan}, -{"tanh", m_torch_FloatTensor_tanh}, -{"sqrt", m_torch_FloatTensor_sqrt}, -{"round", m_torch_FloatTensor_round}, -{"ceil", m_torch_FloatTensor_ceil}, -{"floor", m_torch_FloatTensor_floor}, -{"trunc", m_torch_FloatTensor_trunc}, -{"abs", m_torch_FloatTensor_abs}, -{"frac", m_torch_FloatTensor_frac}, -{"rsqrt", m_torch_FloatTensor_rsqrt}, -{"sigmoid", m_torch_FloatTensor_sigmoid}, -{"neg", m_torch_FloatTensor_neg}, -{"cinv", m_torch_FloatTensor_cinv}, -{"lerp", m_torch_FloatTensor_lerp}, -{"atan2", m_torch_FloatTensor_atan2}, -{"pow", m_torch_FloatTensor_pow}, -{"rand", m_torch_FloatTensor_rand}, -{"randn", m_torch_FloatTensor_randn}, -{"multinomial", m_torch_FloatTensor_multinomial}, -{"uniform", m_torch_FloatTensor_uniform}, -{"normal", m_torch_FloatTensor_normal}, -{"cauchy", m_torch_FloatTensor_cauchy}, -{"logNormal", m_torch_FloatTensor_logNormal}, -{"exponential", m_torch_FloatTensor_exponential}, -{NULL, NULL} -}; - -static const struct luaL_Reg torch_FloatTensorMath__ [] = { -{"zero", torch_FloatTensor_zero}, -{"fill", torch_FloatTensor_fill}, -{"zeros", torch_FloatTensor_zeros}, -{"ones", torch_FloatTensor_ones}, -{"reshape", torch_FloatTensor_reshape}, -{"gather", torch_FloatTensor_gather}, -{"scatter", torch_FloatTensor_scatter}, -{"dot", torch_FloatTensor_dot}, -{"equal", torch_FloatTensor_equal}, -{"add", torch_FloatTensor_add}, -{"csub", torch_FloatTensor_csub}, -{"mul", torch_FloatTensor_mul}, -{"div", torch_FloatTensor_div}, -{"lshift", torch_FloatTensor_lshift}, -{"rshift", torch_FloatTensor_rshift}, -{"fmod", torch_FloatTensor_fmod}, -{"remainder", torch_FloatTensor_remainder}, -{"bitand", torch_FloatTensor_bitand}, -{"bitor", torch_FloatTensor_bitor}, -{"bitxor", torch_FloatTensor_bitxor}, -{"mod", torch_FloatTensor_mod}, -{"clamp", torch_FloatTensor_clamp}, -{"match", torch_FloatTensor_match}, -{"cmul", torch_FloatTensor_cmul}, -{"cpow", torch_FloatTensor_cpow}, -{"cdiv", torch_FloatTensor_cdiv}, -{"clshift", torch_FloatTensor_clshift}, -{"crshift", torch_FloatTensor_crshift}, -{"cfmod", torch_FloatTensor_cfmod}, -{"cremainder", torch_FloatTensor_cremainder}, -{"cbitand", torch_FloatTensor_cbitand}, -{"cbitor", torch_FloatTensor_cbitor}, -{"cbitxor", torch_FloatTensor_cbitxor}, -{"cmod", torch_FloatTensor_cmod}, -{"addcmul", torch_FloatTensor_addcmul}, -{"addcdiv", torch_FloatTensor_addcdiv}, -{"mv", torch_FloatTensor_mv}, -{"mm", torch_FloatTensor_mm}, -{"bmm", torch_FloatTensor_bmm}, -{"ger", torch_FloatTensor_ger}, -{"addmv", torch_FloatTensor_addmv}, -{"addmm", torch_FloatTensor_addmm}, -{"addr", torch_FloatTensor_addr}, -{"addbmm", torch_FloatTensor_addbmm}, -{"baddbmm", torch_FloatTensor_baddbmm}, -{"numel", torch_FloatTensor_numel}, -{"cumsum", torch_FloatTensor_cumsum}, -{"cumprod", torch_FloatTensor_cumprod}, -{"sum", torch_FloatTensor_sum}, -{"prod", torch_FloatTensor_prod}, -{"min", torch_FloatTensor_min}, -{"max", torch_FloatTensor_max}, -{"cmin", torch_FloatTensor_cmin}, -{"cmax", torch_FloatTensor_cmax}, -{"trace", torch_FloatTensor_trace}, -{"cross", torch_FloatTensor_cross}, -{"diag", torch_FloatTensor_diag}, -{"eye", torch_FloatTensor_eye}, -{"range", torch_FloatTensor_range}, -{"randperm", torch_FloatTensor_randperm}, -{"sort", torch_FloatTensor_sort}, -{"topk", torch_FloatTensor_topk}, -{"kthvalue", torch_FloatTensor_kthvalue}, -{"mode", torch_FloatTensor_mode}, -{"median", torch_FloatTensor_median}, -{"tril", torch_FloatTensor_tril}, -{"triu", torch_FloatTensor_triu}, -{"cat", torch_FloatTensor_cat}, -{"random", torch_FloatTensor_random}, -{"geometric", torch_FloatTensor_geometric}, -{"bernoulli", torch_FloatTensor_bernoulli}, -{"squeeze", torch_FloatTensor_squeeze}, -{"sign", torch_FloatTensor_sign}, -{"conv2", torch_FloatTensor_conv2}, -{"xcorr2", torch_FloatTensor_xcorr2}, -{"conv3", torch_FloatTensor_conv3}, -{"xcorr3", torch_FloatTensor_xcorr3}, -{"lt", torch_FloatTensor_lt}, -{"gt", torch_FloatTensor_gt}, -{"le", torch_FloatTensor_le}, -{"ge", torch_FloatTensor_ge}, -{"eq", torch_FloatTensor_eq}, -{"ne", torch_FloatTensor_ne}, -{"nonzero", torch_FloatTensor_nonzero}, -{"mean", torch_FloatTensor_mean}, -{"var", torch_FloatTensor_var}, -{"std", torch_FloatTensor_std}, -{"histc", torch_FloatTensor_histc}, -{"bhistc", torch_FloatTensor_bhistc}, -{"norm", torch_FloatTensor_norm}, -{"renorm", torch_FloatTensor_renorm}, -{"dist", torch_FloatTensor_dist}, -{"linspace", torch_FloatTensor_linspace}, -{"logspace", torch_FloatTensor_logspace}, -{"log", torch_FloatTensor_log}, -{"log1p", torch_FloatTensor_log1p}, -{"exp", torch_FloatTensor_exp}, -{"cos", torch_FloatTensor_cos}, -{"acos", torch_FloatTensor_acos}, -{"cosh", torch_FloatTensor_cosh}, -{"sin", torch_FloatTensor_sin}, -{"asin", torch_FloatTensor_asin}, -{"sinh", torch_FloatTensor_sinh}, -{"tan", torch_FloatTensor_tan}, -{"atan", torch_FloatTensor_atan}, -{"tanh", torch_FloatTensor_tanh}, -{"sqrt", torch_FloatTensor_sqrt}, -{"round", torch_FloatTensor_round}, -{"ceil", torch_FloatTensor_ceil}, -{"floor", torch_FloatTensor_floor}, -{"trunc", torch_FloatTensor_trunc}, -{"abs", torch_FloatTensor_abs}, -{"frac", torch_FloatTensor_frac}, -{"rsqrt", torch_FloatTensor_rsqrt}, -{"sigmoid", torch_FloatTensor_sigmoid}, -{"neg", torch_FloatTensor_neg}, -{"cinv", torch_FloatTensor_cinv}, -{"lerp", torch_FloatTensor_lerp}, -{"atan2", torch_FloatTensor_atan2}, -{"pow", torch_FloatTensor_pow}, -{"rand", torch_FloatTensor_rand}, -{"randn", torch_FloatTensor_randn}, -{"multinomial", torch_FloatTensor_multinomial}, -{"uniform", torch_FloatTensor_uniform}, -{"normal", torch_FloatTensor_normal}, -{"cauchy", torch_FloatTensor_cauchy}, -{"logNormal", torch_FloatTensor_logNormal}, -{"exponential", torch_FloatTensor_exponential}, -{"gesv", torch_FloatTensor_gesv}, -{"gels", torch_FloatTensor_gels}, -{"trtrs", torch_FloatTensor_trtrs}, -{"symeig", torch_FloatTensor_symeig}, -{"eig", torch_FloatTensor_eig}, -{"svd", torch_FloatTensor_svd}, -{"inverse", torch_FloatTensor_inverse}, -{"potrf", torch_FloatTensor_potrf}, -{"potrs", torch_FloatTensor_potrs}, -{"potri", torch_FloatTensor_potri}, -{"pstrf", torch_FloatTensor_pstrf}, -{"qr", torch_FloatTensor_qr}, -{"geqrf", torch_FloatTensor_geqrf}, -{"orgqr", torch_FloatTensor_orgqr}, -{"ormqr", torch_FloatTensor_ormqr}, -{NULL, NULL} -}; - -static void torch_FloatTensorMath_init(lua_State *L) -{ - luaT_pushmetatable(L, "torch.FloatTensor"); - - /* register methods */ - luaT_setfuncs(L, m_torch_FloatTensorMath__, 0); - - /* register functions into the "torch" field of the tensor metaclass */ - lua_pushstring(L, "torch"); - lua_newtable(L); - luaT_setfuncs(L, torch_FloatTensorMath__, 0); - lua_rawset(L, -3); - lua_pop(L, 1); -} - -static const struct luaL_Reg m_torch_HalfTensorMath__ [] = { -{NULL, NULL} -}; - -static const struct luaL_Reg torch_HalfTensorMath__ [] = { -{NULL, NULL} -}; - -static void torch_HalfTensorMath_init(lua_State *L) -{ - luaT_pushmetatable(L, "torch.HalfTensor"); - - /* register methods */ - luaT_setfuncs(L, m_torch_HalfTensorMath__, 0); - - /* register functions into the "torch" field of the tensor metaclass */ - lua_pushstring(L, "torch"); - lua_newtable(L); - luaT_setfuncs(L, torch_HalfTensorMath__, 0); - lua_rawset(L, -3); - lua_pop(L, 1); -} - -static int torch_DoubleTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_zero(arg1); -return 1; -} - -static int torch_DoubleTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_fill(arg1,arg2); -return 1; -} - -static int torch_DoubleTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THDoubleTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_DoubleTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg2 = torch_checklongargs(L, 1); -arg1 = THDoubleTensor_new(); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int torch_DoubleTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_DoubleTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THDoubleTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_DoubleTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -double arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* index LongTensor DoubleTensor | *DoubleTensor* index LongTensor double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THDoubleTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor DoubleTensor", type_buf); -} -arg3 = THDoubleTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int torch_DoubleTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -THDoubleTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor DoubleTensor", type_buf); -} -arg3 = THDoubleTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int torch_DoubleTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -THDoubleTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg6 = (double)lua_tonumber(L, 2); -arg4 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double | [*DoubleTensor*] DoubleTensor [double] DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -THDoubleTensor *arg7 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg6 = (double)lua_tonumber(L, 2); -arg4 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double | [*DoubleTensor*] DoubleTensor [double] DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_div(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_DoubleTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_DoubleTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [double] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_DoubleTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [double] DoubleTensor DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_DoubleTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THDoubleTensor_new(); -THDoubleTensor_resize1d(arg1, arg5->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~2D DoubleTensor~1D", type_buf); -} -THDoubleTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_DoubleTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THDoubleTensor_new(); -THDoubleTensor_resize2d(arg1, arg5->size[0], arg6->size[1]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~2D DoubleTensor~2D", type_buf); -} -THDoubleTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_DoubleTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THDoubleTensor_new(); -THDoubleTensor_resize3d(arg1, arg5->size[0], arg5->size[1], arg6->size[2]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~3D DoubleTensor~3D", type_buf); -} -THDoubleTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_DoubleTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THDoubleTensor_new(); -THDoubleTensor_resize2d(arg1, arg5->size[0], arg6->size[0]); -arg3 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~1D DoubleTensor~1D", type_buf); -} -THDoubleTensor_zero(arg1); -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_DoubleTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [double] DoubleTensor~1D [double] DoubleTensor~2D DoubleTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_DoubleTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 6, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [double] DoubleTensor~2D [double] DoubleTensor~2D DoubleTensor~2D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_DoubleTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 6, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [double] DoubleTensor~2D [double] DoubleTensor~1D DoubleTensor~1D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_DoubleTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [double] DoubleTensor~2D [double] DoubleTensor~3D DoubleTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_DoubleTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 5 -&& lua_isnumber(L, 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg5 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 6, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [double] DoubleTensor~3D [double] DoubleTensor~3D DoubleTensor~3D", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int torch_DoubleTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor", type_buf); -} -arg2 = THDoubleTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_DoubleTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] [*LongTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THDoubleTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_DoubleTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] [*LongTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THDoubleTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int torch_DoubleTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (double)lua_tonumber(L, 2); -arg4 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor | [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (double)lua_tonumber(L, 2); -arg4 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor | [*DoubleTensor*] DoubleTensor double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor", type_buf); -} -arg2 = THDoubleTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int torch_DoubleTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_DoubleTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] long [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 0; -double arg4 = 1; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] double double [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_DoubleTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg3 = (long)lua_tonumber(L, 1); -arg1 = THDoubleTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [Generator] long", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_randperm(arg1,arg2,arg3); - -THDoubleTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_DoubleTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg2_idx = 1; -arg1 = THDoubleTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [*LongTensor*] DoubleTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THDoubleTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_DoubleTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg2_idx = 1; -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THDoubleTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [*LongTensor*] DoubleTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THDoubleTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_DoubleTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [*LongTensor*] DoubleTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THDoubleTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_DoubleTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg2_idx = 1; -arg1 = THDoubleTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [*LongTensor*] DoubleTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THDoubleTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_DoubleTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg2_idx = 1; -arg1 = THDoubleTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [*LongTensor*] DoubleTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THDoubleTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int torch_DoubleTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int torch_DoubleTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -long arg4 = -2; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THDoubleTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THDoubleTensor**)THAlloc(arg6_size * sizeof(THDoubleTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.DoubleTensor"))) - luaL_error(L, "expected DoubleTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THDoubleTensor**)THAlloc(arg6_size * sizeof(THDoubleTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.DoubleTensor"))) - luaL_error(L, "expected DoubleTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THDoubleTensor**)THAlloc(arg6_size * sizeof(THDoubleTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.DoubleTensor"))) - luaL_error(L, "expected DoubleTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THDoubleTensor**)THAlloc(arg6_size * sizeof(THDoubleTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.DoubleTensor"))) - luaL_error(L, "expected DoubleTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor [index] | [*DoubleTensor*] {DoubleTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.DoubleTensor"); -THDoubleTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static void THDoubleTensor_random2__(THDoubleTensor *self, THGenerator *gen, long a, long b) -{ - THArgCheck(b >= a, 2, "upper bound must be larger than lower bound"); - TH_TENSOR_APPLY(double, self, *self_data = ((THRandom_random(gen) % (b+1-a)) + a);) -} - -static void THDoubleTensor_random1__(THDoubleTensor *self, THGenerator *gen, long b) -{ - THArgCheck(b > 0, 1, "upper bound must be strictly positive"); - TH_TENSOR_APPLY(double, self, *self_data = (THRandom_random(gen) % b + 1);) -} - -static int torch_DoubleTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THDoubleTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THDoubleTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *DoubleTensor* [Generator] long long | *DoubleTensor* [Generator] long | *DoubleTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THDoubleTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THDoubleTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *DoubleTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THDoubleTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *DoubleTensor* [Generator] [double] | *DoubleTensor* [Generator] FloatTensor | *DoubleTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THDoubleTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | [*DoubleTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THDoubleTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -{int hasdims = arg4->nDimension > 1; -THDoubleTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THDoubleTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int torch_DoubleTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_sign(arg1,arg2); -return 1; -} - -static int torch_DoubleTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -double arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -double arg11 = 0; -double arg12 = 1; -THDoubleTensor *arg13 = NULL; -THDoubleTensor *arg14 = NULL; -double arg15 = 1; -double arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THDoubleTensor *arg19 = NULL; -int arg19_idx = 0; -double arg20 = 0; -double arg21 = 1; -THDoubleTensor *arg22 = NULL; -THDoubleTensor *arg23 = NULL; -double arg24 = 1; -double arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THDoubleTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THDoubleTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THDoubleTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THDoubleTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~2D DoubleTensor~2D [(V|F)] | [*DoubleTensor*] DoubleTensor~3D DoubleTensor~3D [(V|F)] | [*DoubleTensor*] DoubleTensor~3D DoubleTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.DoubleTensor"); -THDoubleTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.DoubleTensor"); -THDoubleTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -double arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -double arg11 = 0; -double arg12 = 1; -THDoubleTensor *arg13 = NULL; -THDoubleTensor *arg14 = NULL; -double arg15 = 1; -double arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THDoubleTensor *arg19 = NULL; -int arg19_idx = 0; -double arg20 = 0; -double arg21 = 1; -THDoubleTensor *arg22 = NULL; -THDoubleTensor *arg23 = NULL; -double arg24 = 1; -double arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THDoubleTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THDoubleTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THDoubleTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THDoubleTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~2D DoubleTensor~2D [(V|F)] | [*DoubleTensor*] DoubleTensor~3D DoubleTensor~3D [(V|F)] | [*DoubleTensor*] DoubleTensor~3D DoubleTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.DoubleTensor"); -THDoubleTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.DoubleTensor"); -THDoubleTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -double arg7 = 1; -double arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THDoubleTensor *arg11 = NULL; -int arg11_idx = 0; -double arg12 = 0; -double arg13 = 1; -THDoubleTensor *arg14 = NULL; -THDoubleTensor *arg15 = NULL; -double arg16 = 1; -double arg17 = 1; -double arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THDoubleTensor *arg21 = NULL; -int arg21_idx = 0; -double arg22 = 0; -double arg23 = 1; -THDoubleTensor *arg24 = NULL; -THDoubleTensor *arg25 = NULL; -double arg26 = 1; -double arg27 = 1; -double arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THDoubleTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THDoubleTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THDoubleTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THDoubleTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~3D DoubleTensor~3D [(V|F)] | [*DoubleTensor*] DoubleTensor~4D DoubleTensor~4D [(V|F)] | [*DoubleTensor*] DoubleTensor~4D DoubleTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.DoubleTensor"); -THDoubleTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.DoubleTensor"); -THDoubleTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -double arg7 = 1; -double arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THDoubleTensor *arg11 = NULL; -int arg11_idx = 0; -double arg12 = 0; -double arg13 = 1; -THDoubleTensor *arg14 = NULL; -THDoubleTensor *arg15 = NULL; -double arg16 = 1; -double arg17 = 1; -double arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THDoubleTensor *arg21 = NULL; -int arg21_idx = 0; -double arg22 = 0; -double arg23 = 1; -THDoubleTensor *arg24 = NULL; -THDoubleTensor *arg25 = NULL; -double arg26 = 1; -double arg27 = 1; -double arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THDoubleTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THDoubleTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THDoubleTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THDoubleTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~3D DoubleTensor~3D [(V|F)] | [*DoubleTensor*] DoubleTensor~4D DoubleTensor~4D [(V|F)] | [*DoubleTensor*] DoubleTensor~4D DoubleTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.DoubleTensor"); -THDoubleTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.DoubleTensor"); -THDoubleTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THDoubleTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_DoubleTensor_mean(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_meanall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_mean(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_var(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg3 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] DoubleTensor index [boolean]", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_varall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_var(arg3,arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_std(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg3 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] DoubleTensor index [boolean]", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_stdall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_std(arg3,arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_histc(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 100; -double arg4 = 0; -double arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -arg5 = (double)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [long] [double] [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_histc(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_DoubleTensor_bhistc(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 100; -double arg4 = 0; -double arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -arg5 = (double)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [long] [double] [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_bhistc(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_DoubleTensor_norm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 2; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -long arg7 = 0; -int arg8 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg6 = (double)lua_tonumber(L, 2); -arg7 = (long)lua_tonumber(L, 3)-1; -arg4 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -arg7 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor [double] | [*DoubleTensor*] DoubleTensor double index", type_buf); -} -if(argset == 1) -{ -arg3 = THDoubleTensor_normall(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_norm(arg4,arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_renorm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -long arg4 = 0; -double arg5 = 0; -if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg3 = (double)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = (double)lua_tonumber(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = (double)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double index double", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_renorm(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int torch_DoubleTensor_dist(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -THDoubleTensor *arg2 = NULL; -double arg3 = 2; -double arg4 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor DoubleTensor [double]", type_buf); -} -arg4 = THDoubleTensor_dist(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} - -static int torch_DoubleTensor_linspace(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 0; -long arg4 = 100; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] double double [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_linspace(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_DoubleTensor_logspace(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 0; -long arg4 = 100; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] double double [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_logspace(arg1,arg2,arg3,arg4); -return 1; -} - -static int torch_DoubleTensor_log(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_log(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = log(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_log1p(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_log1p(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = log1p(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_exp(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_exp(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = exp(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_cos(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cos(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = cos(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_acos(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_acos(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = acos(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_cosh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cosh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = cosh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_sin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_sin(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sin(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_asin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_asin(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = asin(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_sinh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_sinh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sinh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_tan(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_tan(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = tan(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_atan(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_atan(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = atan(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_tanh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_tanh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = tanh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_sqrt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_sqrt(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sqrt(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_round(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_round(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = round(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_ceil(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_ceil(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = ceil(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_floor(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_floor(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = floor(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_trunc(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_trunc(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = trunc(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_abs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_abs(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = fabs(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_frac(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_frac(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_frac(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_rsqrt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_rsqrt(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_rsqrt(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_sigmoid(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_sigmoid(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_sigmoid(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_neg(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_neg(arg1,arg2); -return 1; -} - -static int torch_DoubleTensor_cinv(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cinv(arg1,arg2); -return 1; -} - -static int torch_DoubleTensor_lerp(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -double arg4 = 0; -double arg5 = 0; -double arg6 = 0; -double arg7 = 0; -double arg8 = 0; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 4); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5 = (double)lua_tonumber(L, 1); -arg6 = (double)lua_tonumber(L, 2); -arg7 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor double | double double double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_lerp(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -arg8 = TH_lerp(arg5,arg6,arg7); -lua_pushnumber(L, (lua_Number)arg8); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_atan2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -double arg4 = 0; -double arg5 = 0; -double arg6 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4 = (double)lua_tonumber(L, 1); -arg5 = (double)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor | double double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_atan2(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -arg6 = atan2(arg4,arg5); -lua_pushnumber(L, (lua_Number)arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_pow(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -double arg5 = 0; -THDoubleTensor *arg6 = NULL; -double arg7 = 0; -double arg8 = 0; -double arg9 = 0; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& (arg6 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5 = (double)lua_tonumber(L, 1); -arg4 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 3; -arg7 = (double)lua_tonumber(L, 1); -arg8 = (double)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor double | [*DoubleTensor*] double DoubleTensor | double double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_pow(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_tpow(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -arg9 = pow(arg7,arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_rand(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg3 = torch_checklongargs(L, 1); -arg1 = THDoubleTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [Generator] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_rand(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_DoubleTensor_randn(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 1 -&& torch_islongargs(L, 1) -) -{ -arg3 = torch_checklongargs(L, 1); -arg1 = THDoubleTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [Generator] (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_randn(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int torch_DoubleTensor_multinomial(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -int arg4 = 0; -int arg5 = 0; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (int)lua_tonumber(L, 2); -arg1 = THLongTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (int)lua_tonumber(L, 3); -arg1 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 4); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (int)lua_tonumber(L, 2); -arg5 = lua_toboolean(L, 3); -arg1 = THLongTensor_new(); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -arg5 = lua_toboolean(L, 4); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, torch_Generator)) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (int)lua_tonumber(L, 3); -arg5 = lua_toboolean(L, 4); -arg1 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 4); -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] [Generator] DoubleTensor int [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THDoubleTensor_multinomial(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int torch_DoubleTensor_uniform(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -double arg7 = 0; -double arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -arg8 = (double)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -arg8 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *DoubleTensor* [Generator] [double] [double]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_uniform(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THDoubleTensor_uniform(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_normal(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -double arg7 = 0; -double arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -arg8 = (double)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -arg8 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *DoubleTensor* [Generator] [double] [double]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_normal(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THDoubleTensor_normal(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_cauchy(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -double arg7 = 0; -double arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -arg8 = (double)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -arg8 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *DoubleTensor* [Generator] [double] [double]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_cauchy(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THDoubleTensor_cauchy(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_logNormal(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 1; -double arg3 = 2; -double arg4 = 0; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -double arg7 = 1; -double arg8 = 2; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -arg8 = (double)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -arg8 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *DoubleTensor* [Generator] [double] [double]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_logNormal(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THDoubleTensor_logNormal(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_exponential(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *DoubleTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_exponential(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_exponential(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_gesv(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THDoubleTensor *arg6 = NULL; -int arg6_idx = 0; -THDoubleTensor *arg7 = NULL; -THDoubleTensor *arg8 = NULL; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5 = THDoubleTensor_new(); -arg6 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* *DoubleTensor* DoubleTensor DoubleTensor | DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THDoubleTensor_gesv(arg1,arg2,arg3,arg4); -return 2; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.DoubleTensor"); -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.DoubleTensor"); -THDoubleTensor_gesv(arg5,arg6,arg7,arg8); -return 2; -} -return 0; -} - -static int torch_DoubleTensor_gels(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THDoubleTensor *arg6 = NULL; -int arg6_idx = 0; -THDoubleTensor *arg7 = NULL; -THDoubleTensor *arg8 = NULL; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5 = THDoubleTensor_new(); -arg6 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* *DoubleTensor* DoubleTensor DoubleTensor | DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THDoubleTensor_gels(arg1,arg2,arg3,arg4); -return 2; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.DoubleTensor"); -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.DoubleTensor"); -THDoubleTensor_gels(arg5,arg6,arg7,arg8); -return 2; -} -return 0; -} - -static int torch_DoubleTensor_trtrs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -const char *arg5 = NULL; -char arg5_default = 'U'; -const char *arg6 = NULL; -char arg6_default = 'N'; -const char *arg7 = NULL; -char arg7_default = 'N'; -THDoubleTensor *arg8 = NULL; -int arg8_idx = 0; -THDoubleTensor *arg9 = NULL; -int arg9_idx = 0; -THDoubleTensor *arg10 = NULL; -THDoubleTensor *arg11 = NULL; -const char *arg12 = NULL; -char arg12_default = 'U'; -const char *arg13 = NULL; -char arg13_default = 'N'; -const char *arg14 = NULL; -char arg14_default = 'N'; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = &arg5_default; -arg6 = &arg6_default; -arg7 = &arg7_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'U' || *arg5 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg6 = &arg6_default; -arg7 = &arg7_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg6 = lua_tostring(L, 5)) && (*arg6 == 'N' || *arg6 == 'T') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = &arg5_default; -arg7 = &arg7_default; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'U' || *arg5 == 'L') -&& (arg6 = lua_tostring(L, 6)) && (*arg6 == 'N' || *arg6 == 'T') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg7 = &arg7_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg7 = lua_tostring(L, 5)) && (*arg7 == 'N' || *arg7 == 'U') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = &arg5_default; -arg6 = &arg6_default; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'U' || *arg5 == 'L') -&& (arg7 = lua_tostring(L, 6)) && (*arg7 == 'N' || *arg7 == 'U') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg6 = &arg6_default; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg6 = lua_tostring(L, 5)) && (*arg6 == 'N' || *arg6 == 'T') -&& (arg7 = lua_tostring(L, 6)) && (*arg7 == 'N' || *arg7 == 'U') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = &arg5_default; -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'U' || *arg5 == 'L') -&& (arg6 = lua_tostring(L, 6)) && (*arg6 == 'N' || *arg6 == 'T') -&& (arg7 = lua_tostring(L, 7)) && (*arg7 == 'N' || *arg7 == 'U') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 2; -arg8 = THDoubleTensor_new(); -arg9 = THDoubleTensor_new(); -arg12 = &arg12_default; -arg13 = &arg13_default; -arg14 = &arg14_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = lua_tostring(L, 3)) && (*arg12 == 'U' || *arg12 == 'L') -) -{ -argset = 2; -arg8 = THDoubleTensor_new(); -arg9 = THDoubleTensor_new(); -arg13 = &arg13_default; -arg14 = &arg14_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg13 = lua_tostring(L, 3)) && (*arg13 == 'N' || *arg13 == 'T') -) -{ -argset = 2; -arg8 = THDoubleTensor_new(); -arg9 = THDoubleTensor_new(); -arg12 = &arg12_default; -arg14 = &arg14_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = lua_tostring(L, 3)) && (*arg12 == 'U' || *arg12 == 'L') -&& (arg13 = lua_tostring(L, 4)) && (*arg13 == 'N' || *arg13 == 'T') -) -{ -argset = 2; -arg8 = THDoubleTensor_new(); -arg9 = THDoubleTensor_new(); -arg14 = &arg14_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg14 = lua_tostring(L, 3)) && (*arg14 == 'N' || *arg14 == 'U') -) -{ -argset = 2; -arg8 = THDoubleTensor_new(); -arg9 = THDoubleTensor_new(); -arg12 = &arg12_default; -arg13 = &arg13_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = lua_tostring(L, 3)) && (*arg12 == 'U' || *arg12 == 'L') -&& (arg14 = lua_tostring(L, 4)) && (*arg14 == 'N' || *arg14 == 'U') -) -{ -argset = 2; -arg8 = THDoubleTensor_new(); -arg9 = THDoubleTensor_new(); -arg13 = &arg13_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg13 = lua_tostring(L, 3)) && (*arg13 == 'N' || *arg13 == 'T') -&& (arg14 = lua_tostring(L, 4)) && (*arg14 == 'N' || *arg14 == 'U') -) -{ -argset = 2; -arg8 = THDoubleTensor_new(); -arg9 = THDoubleTensor_new(); -arg12 = &arg12_default; -} -else if(narg == 5 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = lua_tostring(L, 3)) && (*arg12 == 'U' || *arg12 == 'L') -&& (arg13 = lua_tostring(L, 4)) && (*arg13 == 'N' || *arg13 == 'T') -&& (arg14 = lua_tostring(L, 5)) && (*arg14 == 'N' || *arg14 == 'U') -) -{ -argset = 2; -arg8 = THDoubleTensor_new(); -arg9 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* *DoubleTensor* DoubleTensor DoubleTensor [(U|L)] [(N|T)] [(N|U)] | DoubleTensor DoubleTensor [(U|L)] [(N|T)] [(N|U)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THDoubleTensor_trtrs(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -return 2; -} -else if(argset == 2) -{ -if(arg8_idx) -lua_pushvalue(L, arg8_idx); -else -luaT_pushudata(L, arg8, "torch.DoubleTensor"); -if(arg9_idx) -lua_pushvalue(L, arg9_idx); -else -luaT_pushudata(L, arg9, "torch.DoubleTensor"); -THDoubleTensor_trtrs(arg8,arg9,arg10,arg11,arg12,arg13,arg14); -return 2; -} -return 0; -} - -static int torch_DoubleTensor_symeig(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -const char *arg4 = NULL; -char arg4_default = 'N'; -const char *arg5 = NULL; -char arg5_default = 'U'; -THDoubleTensor *arg6 = NULL; -int arg6_idx = 0; -THDoubleTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -const char *arg9 = NULL; -char arg9_default = 'N'; -const char *arg10 = NULL; -char arg10_default = 'U'; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg4 = &arg4_default; -arg5 = &arg5_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'N' || *arg4 == 'V') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = &arg5_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg5 = lua_tostring(L, 4)) && (*arg5 == 'U' || *arg5 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg4 = &arg4_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'N' || *arg4 == 'V') -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'U' || *arg5 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg6 = THDoubleTensor_new(); -arg7 = THDoubleTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = lua_tostring(L, 2)) && (*arg9 == 'N' || *arg9 == 'V') -) -{ -argset = 2; -arg6 = THDoubleTensor_new(); -arg7 = THDoubleTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg10 = lua_tostring(L, 2)) && (*arg10 == 'U' || *arg10 == 'L') -) -{ -argset = 2; -arg6 = THDoubleTensor_new(); -arg7 = THDoubleTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = lua_tostring(L, 2)) && (*arg9 == 'N' || *arg9 == 'V') -&& (arg10 = lua_tostring(L, 3)) && (*arg10 == 'U' || *arg10 == 'L') -) -{ -argset = 2; -arg6 = THDoubleTensor_new(); -arg7 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* *DoubleTensor* DoubleTensor [(N|V)] [(U|L)] | DoubleTensor [(N|V)] [(U|L)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THDoubleTensor_syev(arg1,arg2,arg3,arg4,arg5); -return 2; -} -else if(argset == 2) -{ -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.DoubleTensor"); -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.DoubleTensor"); -THDoubleTensor_syev(arg6,arg7,arg8,arg9,arg10); -return 2; -} -return 0; -} - -static int torch_DoubleTensor_eig(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -const char *arg4 = NULL; -char arg4_default = 'N'; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THDoubleTensor *arg6 = NULL; -int arg6_idx = 0; -THDoubleTensor *arg7 = NULL; -const char *arg8 = NULL; -char arg8_default = 'N'; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg4 = &arg4_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'N' || *arg4 == 'V') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 1 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5 = THDoubleTensor_new(); -arg6 = THDoubleTensor_new(); -arg8 = &arg8_default; -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg8 = lua_tostring(L, 2)) && (*arg8 == 'N' || *arg8 == 'V') -) -{ -argset = 2; -arg5 = THDoubleTensor_new(); -arg6 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* *DoubleTensor* DoubleTensor [(N|V)] | DoubleTensor [(N|V)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THDoubleTensor_geev(arg1,arg2,arg3,arg4); -return 2; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.DoubleTensor"); -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.DoubleTensor"); -THDoubleTensor_geev(arg5,arg6,arg7,arg8); -return 2; -} -return 0; -} - -static int torch_DoubleTensor_svd(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -const char *arg5 = NULL; -char arg5_default = 'S'; -THDoubleTensor *arg6 = NULL; -int arg6_idx = 0; -THDoubleTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -int arg8_idx = 0; -THDoubleTensor *arg9 = NULL; -const char *arg10 = NULL; -char arg10_default = 'S'; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg3_idx = 3; -arg5 = &arg5_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'A' || *arg5 == 'S') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg3_idx = 3; -} -else if(narg == 1 -&& (arg9 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg6 = THDoubleTensor_new(); -arg7 = THDoubleTensor_new(); -arg8 = THDoubleTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg9 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg10 = lua_tostring(L, 2)) && (*arg10 == 'A' || *arg10 == 'S') -) -{ -argset = 2; -arg6 = THDoubleTensor_new(); -arg7 = THDoubleTensor_new(); -arg8 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* *DoubleTensor* *DoubleTensor* DoubleTensor [(A|S)] | DoubleTensor [(A|S)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -lua_pushvalue(L, arg3_idx); -THDoubleTensor_gesvd(arg1,arg2,arg3,arg4,arg5); -return 3; -} -else if(argset == 2) -{ -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.DoubleTensor"); -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.DoubleTensor"); -if(arg8_idx) -lua_pushvalue(L, arg8_idx); -else -luaT_pushudata(L, arg8, "torch.DoubleTensor"); -THDoubleTensor_gesvd(arg6,arg7,arg8,arg9,arg10); -return 3; -} -return 0; -} - -static int torch_DoubleTensor_inverse(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg3 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor | DoubleTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_getri(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_getri(arg3,arg4); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_potrf(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -const char *arg3 = NULL; -char arg3_default = 'U'; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -const char *arg6 = NULL; -char arg6_default = 'U'; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = &arg3_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = lua_tostring(L, 3)) && (*arg3 == 'U' || *arg3 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4 = THDoubleTensor_new(); -arg6 = &arg6_default; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = lua_tostring(L, 2)) && (*arg6 == 'U' || *arg6 == 'L') -) -{ -argset = 2; -arg4 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor [(U|L)] | DoubleTensor [(U|L)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_potrf(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_potrf(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_potrs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -const char *arg4 = NULL; -char arg4_default = 'U'; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THDoubleTensor *arg6 = NULL; -THDoubleTensor *arg7 = NULL; -const char *arg8 = NULL; -char arg8_default = 'U'; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = &arg4_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'U' || *arg4 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg6 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5 = THDoubleTensor_new(); -arg8 = &arg8_default; -} -else if(narg == 3 -&& (arg6 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'U' || *arg8 == 'L') -) -{ -argset = 2; -arg5 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor DoubleTensor [(U|L)] | DoubleTensor DoubleTensor [(U|L)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_potrs(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.DoubleTensor"); -THDoubleTensor_potrs(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_potri(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -const char *arg3 = NULL; -char arg3_default = 'U'; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -const char *arg6 = NULL; -char arg6_default = 'U'; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = &arg3_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = lua_tostring(L, 3)) && (*arg3 == 'U' || *arg3 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4 = THDoubleTensor_new(); -arg6 = &arg6_default; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = lua_tostring(L, 2)) && (*arg6 == 'U' || *arg6 == 'L') -) -{ -argset = 2; -arg4 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor [(U|L)] | DoubleTensor [(U|L)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_potri(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_potri(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_pstrf(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THIntTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -const char *arg4 = NULL; -char arg4_default = 'U'; -double arg5 = -1; -THDoubleTensor *arg6 = NULL; -int arg6_idx = 0; -THIntTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -const char *arg9 = NULL; -char arg9_default = 'U'; -double arg10 = -1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg4 = &arg4_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'U' || *arg4 == 'L') -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = (double)lua_tonumber(L, 4); -arg4 = &arg4_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.IntTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = lua_tostring(L, 4)) && (*arg4 == 'U' || *arg4 == 'L') -&& lua_isnumber(L, 5) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -arg5 = (double)lua_tonumber(L, 5); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg6 = THDoubleTensor_new(); -arg7 = THIntTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = lua_tostring(L, 2)) && (*arg9 == 'U' || *arg9 == 'L') -) -{ -argset = 2; -arg6 = THDoubleTensor_new(); -arg7 = THIntTensor_new(); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg10 = (double)lua_tonumber(L, 2); -arg6 = THDoubleTensor_new(); -arg7 = THIntTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = lua_tostring(L, 2)) && (*arg9 == 'U' || *arg9 == 'L') -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg10 = (double)lua_tonumber(L, 3); -arg6 = THDoubleTensor_new(); -arg7 = THIntTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* *IntTensor* DoubleTensor [(U|L)] [double] | DoubleTensor [(U|L)] [double]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THDoubleTensor_pstrf(arg1,arg2,arg3,arg4,arg5); -return 2; -} -else if(argset == 2) -{ -if(arg6_idx) -lua_pushvalue(L, arg6_idx); -else -luaT_pushudata(L, arg6, "torch.DoubleTensor"); -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.IntTensor"); -THDoubleTensor_pstrf(arg6,arg7,arg8,arg9,arg10); -return 2; -} -return 0; -} - -static int torch_DoubleTensor_qr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 1 -&& (arg6 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4 = THDoubleTensor_new(); -arg5 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* *DoubleTensor* DoubleTensor | DoubleTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THDoubleTensor_qr(arg1,arg2,arg3); -return 2; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.DoubleTensor"); -THDoubleTensor_qr(arg4,arg5,arg6); -return 2; -} -return 0; -} - -static int torch_DoubleTensor_geqrf(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2_idx = 2; -} -else if(narg == 1 -&& (arg6 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4 = THDoubleTensor_new(); -arg5 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* *DoubleTensor* DoubleTensor | DoubleTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -lua_pushvalue(L, arg2_idx); -THDoubleTensor_geqrf(arg1,arg2,arg3); -return 2; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.DoubleTensor"); -THDoubleTensor_geqrf(arg4,arg5,arg6); -return 2; -} -return 0; -} - -static int torch_DoubleTensor_orgqr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor DoubleTensor | DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_orgqr(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_orgqr(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int torch_DoubleTensor_ormqr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -const char *arg5 = NULL; -char arg5_default = 'L'; -const char *arg6 = NULL; -char arg6_default = 'N'; -THDoubleTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -const char *arg11 = NULL; -char arg11_default = 'L'; -const char *arg12 = NULL; -char arg12_default = 'N'; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg5 = &arg5_default; -arg6 = &arg6_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'L' || *arg5 == 'R') -) -{ -argset = 1; -arg1_idx = 1; -arg6 = &arg6_default; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg6 = lua_tostring(L, 5)) && (*arg6 == 'N' || *arg6 == 'T') -) -{ -argset = 1; -arg1_idx = 1; -arg5 = &arg5_default; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = lua_tostring(L, 5)) && (*arg5 == 'L' || *arg5 == 'R') -&& (arg6 = lua_tostring(L, 6)) && (*arg6 == 'N' || *arg6 == 'T') -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg10 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg7 = THDoubleTensor_new(); -arg11 = &arg11_default; -arg12 = &arg12_default; -} -else if(narg == 4 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg10 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg11 = lua_tostring(L, 4)) && (*arg11 == 'L' || *arg11 == 'R') -) -{ -argset = 2; -arg7 = THDoubleTensor_new(); -arg12 = &arg12_default; -} -else if(narg == 4 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg10 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg12 = lua_tostring(L, 4)) && (*arg12 == 'N' || *arg12 == 'T') -) -{ -argset = 2; -arg7 = THDoubleTensor_new(); -arg11 = &arg11_default; -} -else if(narg == 5 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg10 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg11 = lua_tostring(L, 4)) && (*arg11 == 'L' || *arg11 == 'R') -&& (arg12 = lua_tostring(L, 5)) && (*arg12 == 'N' || *arg12 == 'T') -) -{ -argset = 2; -arg7 = THDoubleTensor_new(); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor DoubleTensor DoubleTensor [(L|R)] [(N|T)] | DoubleTensor DoubleTensor DoubleTensor [(L|R)] [(N|T)]", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_ormqr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.DoubleTensor"); -THDoubleTensor_ormqr(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_zero(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor*", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_zero(arg1); -return 1; -} - -static int m_torch_DoubleTensor_fill(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_fill(arg1,arg2); -return 1; -} - -static int m_torch_DoubleTensor_zeros(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_zeros(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_DoubleTensor_ones(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongStorage *arg2 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg2 = torch_checklongargs(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_ones(arg1,arg2); -THLongStorage_free(arg2); -return 1; -} - -static int m_torch_DoubleTensor_reshape(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_islongargs(L, 2) -) -{ -arg3 = torch_checklongargs(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor (LongStorage | dim1 [dim2...])", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_reshape(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_DoubleTensor_gather(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 0; -THLongTensor *arg4 = NULL; -if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.LongTensor")) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -THLongStorage* arg1_size = THLongTensor_newSizeOf(arg4); -THDoubleTensor_resize(arg1, arg1_size, NULL); -THLongStorage_free(arg1_size); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.LongTensor")) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor index LongTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_gather(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_DoubleTensor_scatter(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -THLongTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -long arg6 = 0; -THLongTensor *arg7 = NULL; -double arg8 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg3 = luaT_toudata(L, 3, "torch.LongTensor")) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2)-1; -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.LongTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg6 = (long)lua_tonumber(L, 2)-1; -arg8 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* index LongTensor DoubleTensor | *DoubleTensor* index LongTensor double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_scatter(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THDoubleTensor_scatterFill(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_dot(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor DoubleTensor", type_buf); -} -arg3 = THDoubleTensor_dot(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} - -static int m_torch_DoubleTensor_equal(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -THDoubleTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor DoubleTensor", type_buf); -} -arg3 = THDoubleTensor_equal(arg1,arg2); -lua_pushboolean(L, arg3); -return 1; -} - -static int m_torch_DoubleTensor_add(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -THDoubleTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double | *DoubleTensor* [DoubleTensor] [double] DoubleTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_add(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_cadd(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_csub(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -THDoubleTensor *arg7 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg7 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg7 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg7 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& (arg7 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double | *DoubleTensor* [DoubleTensor] [double] DoubleTensor", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_sub(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_csub(arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_mul(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_mul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_div(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_div(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_lshift(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_lshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_rshift(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_rshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_fmod(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_remainder(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_remainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_bitand(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_bitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_bitor(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_bitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_bitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_bitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_mod(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_fmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_clamp(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_clamp(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_DoubleTensor_match(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor DoubleTensor [double]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_match(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_DoubleTensor_cmul(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cmul(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_cpow(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cpow(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_cdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cdiv(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_clshift(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_clshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_crshift(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_crshift(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_cfmod(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_cremainder(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cremainder(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_cbitand(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cbitand(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_cbitor(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cbitor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_cbitxor(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cbitxor(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_cmod(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cfmod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_addcmul(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] [double] DoubleTensor DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_addcmul(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_DoubleTensor_addcdiv(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg4 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& (arg4 = luaT_toudata(L, 4, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 5, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] [double] DoubleTensor DoubleTensor", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_addcdiv(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_DoubleTensor_mv(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor~2D DoubleTensor~1D", type_buf); -} -THDoubleTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THDoubleTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_DoubleTensor_mm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor~2D DoubleTensor~2D", type_buf); -} -THDoubleTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THDoubleTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_DoubleTensor_bmm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor~3D DoubleTensor~3D", type_buf); -} -THDoubleTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THDoubleTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_DoubleTensor_ger(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -arg1_idx = 1; -arg3 = arg1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* DoubleTensor~1D DoubleTensor~1D", type_buf); -} -THDoubleTensor_zero(arg1); -lua_pushvalue(L, arg1_idx); -THDoubleTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} - -static int m_torch_DoubleTensor_addmv(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -THDoubleTensor *arg7 = NULL; -int arg7_idx = 0; -double arg8 = 0; -THDoubleTensor *arg9 = NULL; -double arg10 = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 1) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 1) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 1) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 1) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -arg10 = (double)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg7->nDimension == 1) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg9->nDimension == 1) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.DoubleTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -arg10 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor~1D* [DoubleTensor~1D] [double] DoubleTensor~2D DoubleTensor~1D | *DoubleTensor~1D* double [DoubleTensor~1D] double DoubleTensor~2D DoubleTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_addmv(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THDoubleTensor_addmv(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_addmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -THDoubleTensor *arg7 = NULL; -int arg7_idx = 0; -double arg8 = 0; -THDoubleTensor *arg9 = NULL; -double arg10 = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -arg10 = (double)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg11->nDimension == 2) -&& (arg12 = luaT_toudata(L, 6, "torch.DoubleTensor")) && (arg12->nDimension == 2) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -arg10 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor~2D* [DoubleTensor~2D] [double] DoubleTensor~2D DoubleTensor~2D | *DoubleTensor~2D* double [DoubleTensor~2D] double DoubleTensor~2D DoubleTensor~2D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_addmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THDoubleTensor_addmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_addr(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -THDoubleTensor *arg7 = NULL; -int arg7_idx = 0; -double arg8 = 0; -THDoubleTensor *arg9 = NULL; -double arg10 = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 1) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 1) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -arg10 = (double)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg11->nDimension == 1) -&& (arg12 = luaT_toudata(L, 6, "torch.DoubleTensor")) && (arg12->nDimension == 1) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -arg10 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor~2D* [DoubleTensor~2D] [double] DoubleTensor~1D DoubleTensor~1D | *DoubleTensor~2D* double [DoubleTensor~2D] double DoubleTensor~1D DoubleTensor~1D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_addr(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THDoubleTensor_addr(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_addbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -THDoubleTensor *arg7 = NULL; -int arg7_idx = 0; -double arg8 = 0; -THDoubleTensor *arg9 = NULL; -double arg10 = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 2) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 2) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -arg10 = (double)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg7->nDimension == 2) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg9->nDimension == 2) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.DoubleTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -arg10 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor~2D* [DoubleTensor~2D] [double] DoubleTensor~3D DoubleTensor~3D | *DoubleTensor~2D* double [DoubleTensor~2D] double DoubleTensor~3D DoubleTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_addbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THDoubleTensor_addbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_baddbmm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 1; -THDoubleTensor *arg3 = NULL; -double arg4 = 1; -THDoubleTensor *arg5 = NULL; -THDoubleTensor *arg6 = NULL; -THDoubleTensor *arg7 = NULL; -int arg7_idx = 0; -double arg8 = 0; -THDoubleTensor *arg9 = NULL; -double arg10 = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 2); -arg3 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg1->nDimension == 3) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg3->nDimension == 3) -&& lua_isnumber(L, 3) -&& (arg5 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg6 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg6->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 5 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& (arg11 = luaT_toudata(L, 4, "torch.DoubleTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -arg10 = (double)lua_tonumber(L, 3); -arg9 = arg7; -} -else if(narg == 6 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg7->nDimension == 3) -&& lua_isnumber(L, 2) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg9->nDimension == 3) -&& lua_isnumber(L, 4) -&& (arg11 = luaT_toudata(L, 5, "torch.DoubleTensor")) && (arg11->nDimension == 3) -&& (arg12 = luaT_toudata(L, 6, "torch.DoubleTensor")) && (arg12->nDimension == 3) -) -{ -argset = 2; -arg7_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -arg10 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor~3D* [DoubleTensor~3D] [double] DoubleTensor~3D DoubleTensor~3D | *DoubleTensor~3D* double [DoubleTensor~3D] double DoubleTensor~3D DoubleTensor~3D", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_baddbmm(arg1,arg2,arg3,arg4,arg5,arg6); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg7_idx); -THDoubleTensor_baddbmm(arg7,arg8,arg9,arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_numel(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -ptrdiff_t arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor", type_buf); -} -arg2 = THDoubleTensor_numel(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_DoubleTensor_cumsum(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cumsum(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_cumprod(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cumprod(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_sum(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_sumall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_sum(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_prod(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_prodall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_prod(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_min(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] [*LongTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_minall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THDoubleTensor_min(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_DoubleTensor_max(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THLongTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -long arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg4 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (long)lua_tonumber(L, 3)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg4_idx = 2; -arg6 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] [*LongTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_maxall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.LongTensor"); -THDoubleTensor_max(arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg4, arg4, 1); -return 2; -} -return 0; -} - -static int m_torch_DoubleTensor_cmin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (double)lua_tonumber(L, 1); -arg4 = THDoubleTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (double)lua_tonumber(L, 2); -arg4 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [DoubleTensor] DoubleTensor | [*DoubleTensor*] [DoubleTensor] double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cmin(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_cminValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_cmax(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (double)lua_tonumber(L, 1); -arg4 = THDoubleTensor_new(); -arg5 = arg4; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -arg5 = arg4; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (double)lua_tonumber(L, 2); -arg4 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [DoubleTensor] DoubleTensor | [*DoubleTensor*] [DoubleTensor] double", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cmax(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_cmaxValue(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_trace(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor", type_buf); -} -arg2 = THDoubleTensor_trace(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int m_torch_DoubleTensor_cross(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -long arg4 = -1; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cross(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_DoubleTensor_diag(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [long]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_diag(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_eye(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -long arg2 = 0; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* long [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_eye(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_range(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 0; -double arg4 = 1; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* double double [double]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_range(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_DoubleTensor_randperm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -long arg3 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [Generator] long", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_randperm(arg1,arg2,arg3); - -THDoubleTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_DoubleTensor_sort(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg2_idx = 1; -arg1 = THDoubleTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isboolean(L, 2) -) -{ -arg5 = lua_toboolean(L, 2); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg5 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg5 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = lua_toboolean(L, 4); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg5 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [*LongTensor*] DoubleTensor [index] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THDoubleTensor_sort(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_DoubleTensor_topk(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -long arg4 = 1; -long arg5 = 0; -int arg6 = 0; -int arg7 = 0; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg2_idx = 1; -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isboolean(L, 2) -) -{ -arg6 = lua_toboolean(L, 2); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isboolean(L, 2) -) -{ -arg7 = lua_toboolean(L, 2); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg1_idx = 1; -arg7 = lua_toboolean(L, 3); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -) -{ -arg2_idx = 1; -arg7 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg7 = lua_toboolean(L, 4); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg7 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg7 = lua_toboolean(L, 5); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg7 = lua_toboolean(L, 6); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isboolean(L, 2) -&& lua_isboolean(L, 3) -) -{ -arg6 = lua_toboolean(L, 2); -arg7 = lua_toboolean(L, 3); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg1_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg2_idx = 1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -&& lua_isboolean(L, 4) -) -{ -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg7 = lua_toboolean(L, 4); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg1_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg2 = THLongTensor_new(); -} -else if(narg == 5 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg2_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THDoubleTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -} -else if(narg == 5 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -&& lua_isboolean(L, 5) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -arg7 = lua_toboolean(L, 5); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg2 = THLongTensor_new(); -} -else if(narg == 6 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -&& lua_isboolean(L, 6) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg6 = lua_toboolean(L, 5); -arg7 = lua_toboolean(L, 6); -arg1 = THDoubleTensor_new(); -} -else if(narg == 7 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -&& lua_isboolean(L, 6) -&& lua_isboolean(L, 7) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -arg6 = lua_toboolean(L, 6); -arg7 = lua_toboolean(L, 7); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [*LongTensor*] DoubleTensor [long] [index] [boolean] [boolean]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THDoubleTensor_topk(arg1,arg2,arg3,arg4,arg5,arg6,arg7); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_DoubleTensor_kthvalue(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -long arg4 = 0; -long arg5 = 0; -int arg6 = 1; -if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg2 = THLongTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (long)lua_tonumber(L, 2); -arg5 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3); -arg5 = (long)lua_tonumber(L, 4)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4); -arg5 = (long)lua_tonumber(L, 5)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [*LongTensor*] DoubleTensor long [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THDoubleTensor_kthvalue(arg1,arg2,arg3,arg4,arg5,arg6); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_DoubleTensor_mode(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg2_idx = 1; -arg1 = THDoubleTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [*LongTensor*] DoubleTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THDoubleTensor_mode(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_DoubleTensor_median(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THLongTensor *arg2 = NULL; -int arg2_idx = 0; -THDoubleTensor *arg3 = NULL; -long arg4 = 0; -int arg5 = 1; -if(narg == 1 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = THLongTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg2_idx = 1; -arg1 = THDoubleTensor_new(); -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = THDoubleTensor_nDimension(arg3)-1; -} -else if(narg == 2 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (long)lua_tonumber(L, 2)-1; -arg1 = THDoubleTensor_new(); -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg2 = THLongTensor_new(); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg2_idx = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2_idx = 2; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] [*LongTensor*] DoubleTensor [index]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.LongTensor"); -THDoubleTensor_median(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg2, arg2, 1); -return 2; -} - -static int m_torch_DoubleTensor_tril(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_tril(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_triu(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -int arg3 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (int)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (int)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [int]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_triu(arg1,arg2,arg3); -return 1; -} - -static int m_torch_DoubleTensor_cat(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -long arg4 = -2; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THDoubleTensor **arg6_data = NULL; -long arg6_size = 0; -int arg6_i = 0; -long arg7 = -2; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg4 = (long)lua_tonumber(L, 3)-1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (long)lua_tonumber(L, 4)-1; -} -else if(narg == 1 -&& torch_isnonemptytable(L, 1) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THDoubleTensor**)THAlloc(arg6_size * sizeof(THDoubleTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.DoubleTensor"))) - luaL_error(L, "expected DoubleTensor in tensor array"); - lua_pop(L, 1); -} - -arg5 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_isnonemptytable(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THDoubleTensor**)THAlloc(arg6_size * sizeof(THDoubleTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.DoubleTensor"))) - luaL_error(L, "expected DoubleTensor in tensor array"); - lua_pop(L, 1); -} - -} -else if(narg == 2 -&& torch_isnonemptytable(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 1, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THDoubleTensor**)THAlloc(arg6_size * sizeof(THDoubleTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.DoubleTensor"))) - luaL_error(L, "expected DoubleTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 2)-1; -arg5 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_isnonemptytable(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -do -{ - arg6_size++; - lua_checkstack(L, 1); - lua_rawgeti(L, 2, arg6_size); -} -while (!lua_isnil(L, -1)); -arg6_size--; -lua_pop(L, 1); -arg6_data = (THDoubleTensor**)THAlloc(arg6_size * sizeof(THDoubleTensor*)); -for (arg6_i = arg6_size - 1; arg6_i >= 0; arg6_i--) -{ - if (!(arg6_data[arg6_i] = luaT_toudata(L, -1, "torch.DoubleTensor"))) - luaL_error(L, "expected DoubleTensor in tensor array"); - lua_pop(L, 1); -} - -arg7 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor DoubleTensor [index] | [*DoubleTensor*] {DoubleTensor+} [index]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_cat(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -if(arg5_idx) -lua_pushvalue(L, arg5_idx); -else -luaT_pushudata(L, arg5, "torch.DoubleTensor"); -THDoubleTensor_catArray(arg5,arg6_data,arg6_size,arg7); -THFree(arg6_data); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_random(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -long arg2 = 0; -long arg3 = 0; -long arg4 = 0; -THGenerator *arg5 = NULL; -long arg6 = 0; -long arg7 = 0; -THGenerator *arg8 = NULL; -long arg9 = 0; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -long arg12 = 0; -long arg13 = 0; -THDoubleTensor *arg14 = NULL; -int arg14_idx = 0; -THGenerator *arg15 = NULL; -long arg16 = 0; -THDoubleTensor *arg17 = NULL; -int arg17_idx = 0; -THGenerator *arg18 = NULL; -if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 1); -arg3 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (long)lua_tonumber(L, 2); -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg6 = (long)lua_tonumber(L, 2); -} -else if(narg == 0 -) -{ -argset = 3; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg8 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 3; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 2); -arg13 = (long)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 4; -arg10_idx = 1; -arg12 = (long)lua_tonumber(L, 3); -arg13 = (long)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg15 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg15 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 5; -arg14_idx = 1; -arg16 = (long)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg17 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 6; -arg17_idx = 1; -lua_getglobal(L,"torch"); -arg18 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg17 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg18 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 6; -arg17_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long long | [Generator] long | [Generator] | *DoubleTensor* [Generator] long long | *DoubleTensor* [Generator] long | *DoubleTensor* [Generator]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_random2__(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -arg7 = THRandom_random1__(arg5,arg6); -lua_pushnumber(L, (lua_Number)arg7); -return 1; -} -else if(argset == 3) -{ -arg9 = THRandom_random(arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_random2__(arg10,arg11,arg12,arg13); -return 1; -} -else if(argset == 5) -{ -lua_pushvalue(L, arg14_idx); -THDoubleTensor_random1__(arg14,arg15,arg16); -return 1; -} -else if(argset == 6) -{ -lua_pushvalue(L, arg17_idx); -THDoubleTensor_random(arg17,arg18); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_geometric(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *DoubleTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_geometric(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_geometric(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_bernoulli(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0.5; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0.5; -THDoubleTensor *arg7 = NULL; -int arg7_idx = 0; -THGenerator *arg8 = NULL; -THFloatTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THGenerator *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg4_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -lua_getglobal(L,"torch"); -arg8 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg8 = luaT_toudata(L, 2, torch_Generator)) -&& (arg9 = luaT_toudata(L, 3, "torch.FloatTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 2 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -lua_getglobal(L,"torch"); -arg11 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, torch_Generator)) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] | *DoubleTensor* [Generator] [double] | *DoubleTensor* [Generator] FloatTensor | *DoubleTensor* [Generator] DoubleTensor", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_bernoulli(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_bernoulli(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -lua_pushvalue(L, arg7_idx); -THDoubleTensor_bernoulli_FloatTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_bernoulli_DoubleTensor(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_squeeze(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor | [*DoubleTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_squeeze(arg1,arg2); -if(arg1->nDimension == 1 && arg1->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THDoubleTensor_data(arg1))); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -{int hasdims = arg4->nDimension > 1; -THDoubleTensor_squeeze1d(arg3,arg4,arg5); -if(!hasdims && arg3->nDimension == 1 && arg3->size[0] == 1) -lua_pushnumber(L, (lua_Number)(*THDoubleTensor_data(arg3)));} -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_sign(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_sign(arg1,arg2); -return 1; -} - -static int m_torch_DoubleTensor_conv2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -double arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'C'; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -double arg11 = 0; -double arg12 = 1; -THDoubleTensor *arg13 = NULL; -THDoubleTensor *arg14 = NULL; -double arg15 = 1; -double arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'C'; -THDoubleTensor *arg19 = NULL; -int arg19_idx = 0; -double arg20 = 0; -double arg21 = 1; -THDoubleTensor *arg22 = NULL; -THDoubleTensor *arg23 = NULL; -double arg24 = 1; -double arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THDoubleTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THDoubleTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THDoubleTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THDoubleTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~2D DoubleTensor~2D [(V|F)] | [*DoubleTensor*] DoubleTensor~3D DoubleTensor~3D [(V|F)] | [*DoubleTensor*] DoubleTensor~3D DoubleTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.DoubleTensor"); -THDoubleTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.DoubleTensor"); -THDoubleTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_xcorr2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -double arg7 = 1; -const char *arg8 = NULL; -char arg8_default = 'V'; -const char *arg9 = NULL; -char arg9_default = 'X'; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -double arg11 = 0; -double arg12 = 1; -THDoubleTensor *arg13 = NULL; -THDoubleTensor *arg14 = NULL; -double arg15 = 1; -double arg16 = 1; -const char *arg17 = NULL; -char arg17_default = 'V'; -const char *arg18 = NULL; -char arg18_default = 'X'; -THDoubleTensor *arg19 = NULL; -int arg19_idx = 0; -double arg20 = 0; -double arg21 = 1; -THDoubleTensor *arg22 = NULL; -THDoubleTensor *arg23 = NULL; -double arg24 = 1; -double arg25 = 1; -const char *arg26 = NULL; -char arg26_default = 'V'; -const char *arg27 = NULL; -char arg27_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -) -{ -argset = 1; -arg1_idx = 1; -arg8 = &arg8_default; -arg9 = &arg9_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 3)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg9 = &arg9_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 2) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 2) -&& (arg8 = lua_tostring(L, 4)) && (*arg8 == 'V' || *arg8 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -} -else if(narg == 2 -&& (arg13 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10 = THDoubleTensor_new(); -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg14->nDimension == 3) -) -{ -argset = 2; -arg10_idx = 1; -arg17 = &arg17_default; -arg18 = &arg18_default; -} -else if(narg == 3 -&& (arg13 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 3)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10 = THDoubleTensor_new(); -arg18 = &arg18_default; -} -else if(narg == 4 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg13 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg13->nDimension == 3) -&& (arg14 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg14->nDimension == 3) -&& (arg17 = lua_tostring(L, 4)) && (*arg17 == 'V' || *arg17 == 'F') -) -{ -argset = 2; -arg10_idx = 1; -arg18 = &arg18_default; -} -else if(narg == 2 -&& (arg22 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19 = THDoubleTensor_new(); -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg19 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg23->nDimension == 4) -) -{ -argset = 3; -arg19_idx = 1; -arg26 = &arg26_default; -arg27 = &arg27_default; -} -else if(narg == 3 -&& (arg22 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 3)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19 = THDoubleTensor_new(); -arg27 = &arg27_default; -} -else if(narg == 4 -&& (arg19 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg22 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg22->nDimension == 3) -&& (arg23 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg23->nDimension == 4) -&& (arg26 = lua_tostring(L, 4)) && (*arg26 == 'V' || *arg26 == 'F') -) -{ -argset = 3; -arg19_idx = 1; -arg27 = &arg27_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~2D DoubleTensor~2D [(V|F)] | [*DoubleTensor*] DoubleTensor~3D DoubleTensor~3D [(V|F)] | [*DoubleTensor*] DoubleTensor~3D DoubleTensor~4D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_conv2Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9); -return 1; -} -else if(argset == 2) -{ -if(arg10_idx) -lua_pushvalue(L, arg10_idx); -else -luaT_pushudata(L, arg10, "torch.DoubleTensor"); -THDoubleTensor_conv2Dcmul(arg10,arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18); -return 1; -} -else if(argset == 3) -{ -if(arg19_idx) -lua_pushvalue(L, arg19_idx); -else -luaT_pushudata(L, arg19, "torch.DoubleTensor"); -THDoubleTensor_conv2Dmv(arg19,arg20,arg21,arg22,arg23,arg24,arg25,arg26,arg27); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_conv3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -double arg7 = 1; -double arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'C'; -THDoubleTensor *arg11 = NULL; -int arg11_idx = 0; -double arg12 = 0; -double arg13 = 1; -THDoubleTensor *arg14 = NULL; -THDoubleTensor *arg15 = NULL; -double arg16 = 1; -double arg17 = 1; -double arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'C'; -THDoubleTensor *arg21 = NULL; -int arg21_idx = 0; -double arg22 = 0; -double arg23 = 1; -THDoubleTensor *arg24 = NULL; -THDoubleTensor *arg25 = NULL; -double arg26 = 1; -double arg27 = 1; -double arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'C'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THDoubleTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THDoubleTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THDoubleTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THDoubleTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~3D DoubleTensor~3D [(V|F)] | [*DoubleTensor*] DoubleTensor~4D DoubleTensor~4D [(V|F)] | [*DoubleTensor*] DoubleTensor~4D DoubleTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.DoubleTensor"); -THDoubleTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.DoubleTensor"); -THDoubleTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_xcorr3(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 1; -THDoubleTensor *arg4 = NULL; -THDoubleTensor *arg5 = NULL; -double arg6 = 1; -double arg7 = 1; -double arg8 = 1; -const char *arg9 = NULL; -char arg9_default = 'V'; -const char *arg10 = NULL; -char arg10_default = 'X'; -THDoubleTensor *arg11 = NULL; -int arg11_idx = 0; -double arg12 = 0; -double arg13 = 1; -THDoubleTensor *arg14 = NULL; -THDoubleTensor *arg15 = NULL; -double arg16 = 1; -double arg17 = 1; -double arg18 = 1; -const char *arg19 = NULL; -char arg19_default = 'V'; -const char *arg20 = NULL; -char arg20_default = 'X'; -THDoubleTensor *arg21 = NULL; -int arg21_idx = 0; -double arg22 = 0; -double arg23 = 1; -THDoubleTensor *arg24 = NULL; -THDoubleTensor *arg25 = NULL; -double arg26 = 1; -double arg27 = 1; -double arg28 = 1; -const char *arg29 = NULL; -char arg29_default = 'V'; -const char *arg30 = NULL; -char arg30_default = 'X'; -if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -) -{ -argset = 1; -arg1_idx = 1; -arg9 = &arg9_default; -arg10 = &arg10_default; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 3)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1 = THDoubleTensor_new(); -arg10 = &arg10_default; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg4->nDimension == 3) -&& (arg5 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg5->nDimension == 3) -&& (arg9 = lua_tostring(L, 4)) && (*arg9 == 'V' || *arg9 == 'F') -) -{ -argset = 1; -arg1_idx = 1; -arg10 = &arg10_default; -} -else if(narg == 2 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11 = THDoubleTensor_new(); -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg11 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg15->nDimension == 4) -) -{ -argset = 2; -arg11_idx = 1; -arg19 = &arg19_default; -arg20 = &arg20_default; -} -else if(narg == 3 -&& (arg14 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 3)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11 = THDoubleTensor_new(); -arg20 = &arg20_default; -} -else if(narg == 4 -&& (arg11 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg14 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg14->nDimension == 4) -&& (arg15 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg15->nDimension == 4) -&& (arg19 = lua_tostring(L, 4)) && (*arg19 == 'V' || *arg19 == 'F') -) -{ -argset = 2; -arg11_idx = 1; -arg20 = &arg20_default; -} -else if(narg == 2 -&& (arg24 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21 = THDoubleTensor_new(); -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg21 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg25->nDimension == 5) -) -{ -argset = 3; -arg21_idx = 1; -arg29 = &arg29_default; -arg30 = &arg30_default; -} -else if(narg == 3 -&& (arg24 = luaT_toudata(L, 1, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 3)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21 = THDoubleTensor_new(); -arg30 = &arg30_default; -} -else if(narg == 4 -&& (arg21 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg24 = luaT_toudata(L, 2, "torch.DoubleTensor")) && (arg24->nDimension == 4) -&& (arg25 = luaT_toudata(L, 3, "torch.DoubleTensor")) && (arg25->nDimension == 5) -&& (arg29 = lua_tostring(L, 4)) && (*arg29 == 'V' || *arg29 == 'F') -) -{ -argset = 3; -arg21_idx = 1; -arg30 = &arg30_default; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor~3D DoubleTensor~3D [(V|F)] | [*DoubleTensor*] DoubleTensor~4D DoubleTensor~4D [(V|F)] | [*DoubleTensor*] DoubleTensor~4D DoubleTensor~5D [(V|F)]", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_conv3Dmul(arg1,arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9,arg10); -return 1; -} -else if(argset == 2) -{ -if(arg11_idx) -lua_pushvalue(L, arg11_idx); -else -luaT_pushudata(L, arg11, "torch.DoubleTensor"); -THDoubleTensor_conv3Dcmul(arg11,arg12,arg13,arg14,arg15,arg16,arg17,arg18,arg19,arg20); -return 1; -} -else if(argset == 3) -{ -if(arg21_idx) -lua_pushvalue(L, arg21_idx); -else -luaT_pushudata(L, arg21, "torch.DoubleTensor"); -THDoubleTensor_conv3Dmv(arg21,arg22,arg23,arg24,arg25,arg26,arg27,arg28,arg29,arg30); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_lt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_ltValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_ltValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_ltTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_ltTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_gt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_gtValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_gtValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_gtTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_gtTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_le(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_leValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_leValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_leTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_leTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_ge(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_geValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_geValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_geTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_geTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_eq(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_eqValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_eqValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_eqTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_eqTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_ne(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THByteTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -THByteTensor *arg7 = NULL; -int arg7_idx = 0; -THDoubleTensor *arg8 = NULL; -THDoubleTensor *arg9 = NULL; -THDoubleTensor *arg10 = NULL; -int arg10_idx = 0; -THDoubleTensor *arg11 = NULL; -THDoubleTensor *arg12 = NULL; -if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -arg1 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg8 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7 = THByteTensor_new(); -} -else if(narg == 3 -&& (arg7 = luaT_toudata(L, 1, "torch.ByteTensor")) -&& (arg8 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg9 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 3; -arg7_idx = 1; -} -else if(narg == 3 -&& (arg10 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg11 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg12 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 4; -arg10_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*ByteTensor*] DoubleTensor double | *DoubleTensor* DoubleTensor double | [*ByteTensor*] DoubleTensor DoubleTensor | *DoubleTensor* DoubleTensor DoubleTensor", type_buf); -} -if(argset == 1) -{ -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.ByteTensor"); -THDoubleTensor_neValue(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_neValueT(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -if(arg7_idx) -lua_pushvalue(L, arg7_idx); -else -luaT_pushudata(L, arg7, "torch.ByteTensor"); -THDoubleTensor_neTensor(arg7,arg8,arg9); -return 1; -} -else if(argset == 4) -{ -lua_pushvalue(L, arg10_idx); -THDoubleTensor_neTensorT(arg10,arg11,arg12); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_nonzero(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THLongTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*LongTensor*] DoubleTensor", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.LongTensor"); -THDoubleTensor_nonzero(arg1,arg2); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_DoubleTensor_mean(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] DoubleTensor index", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_meanall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_mean(arg3,arg4,arg5,arg6); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_var(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg3 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] DoubleTensor index [boolean]", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_varall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_var(arg3,arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_std(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 0; -THDoubleTensor *arg3 = NULL; -int arg3_idx = 0; -THDoubleTensor *arg4 = NULL; -long arg5 = 0; -int arg6 = 0; -int arg7 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg3 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isboolean(L, 3) -) -{ -argset = 2; -arg5 = (long)lua_tonumber(L, 2)-1; -arg6 = lua_toboolean(L, 3); -arg3 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg3 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg4 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -argset = 2; -arg3_idx = 1; -arg5 = (long)lua_tonumber(L, 3)-1; -arg6 = lua_toboolean(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor | [*DoubleTensor*] DoubleTensor index [boolean]", type_buf); -} -if(argset == 1) -{ -arg2 = THDoubleTensor_stdall(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} -else if(argset == 2) -{ -if(arg3_idx) -lua_pushvalue(L, arg3_idx); -else -luaT_pushudata(L, arg3, "torch.DoubleTensor"); -THDoubleTensor_std(arg3,arg4,arg5,arg6,arg7); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_histc(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 100; -double arg4 = 0; -double arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -arg5 = (double)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [long] [double] [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_histc(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_DoubleTensor_bhistc(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -long arg3 = 100; -double arg4 = 0; -double arg5 = 0; -if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1 = THDoubleTensor_new(); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -} -else if(narg == 2 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -arg5 = (double)lua_tonumber(L, 2); -arg1 = THDoubleTensor_new(); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg5 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 3 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg4 = (double)lua_tonumber(L, 2); -arg5 = (double)lua_tonumber(L, 3); -arg1 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -} -else if(narg == 4 -&& (arg2 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg3 = (long)lua_tonumber(L, 2); -arg4 = (double)lua_tonumber(L, 3); -arg5 = (double)lua_tonumber(L, 4); -arg1 = THDoubleTensor_new(); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (long)lua_tonumber(L, 3); -arg4 = (double)lua_tonumber(L, 4); -arg5 = (double)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [*DoubleTensor*] DoubleTensor [long] [double] [double]", type_buf); -} -if(arg1_idx) -lua_pushvalue(L, arg1_idx); -else -luaT_pushudata(L, arg1, "torch.DoubleTensor"); -THDoubleTensor_bhistc(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_DoubleTensor_norm(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -double arg2 = 2; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THDoubleTensor *arg5 = NULL; -double arg6 = 0; -long arg7 = 0; -int arg8 = 1; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg6 = (double)lua_tonumber(L, 2); -arg7 = (long)lua_tonumber(L, 3)-1; -arg4 = THDoubleTensor_new(); -} -else if(narg == 4 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -arg7 = (long)lua_tonumber(L, 4)-1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor [double] | [*DoubleTensor*] DoubleTensor double index", type_buf); -} -if(argset == 1) -{ -arg3 = THDoubleTensor_normall(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -if(arg4_idx) -lua_pushvalue(L, arg4_idx); -else -luaT_pushudata(L, arg4, "torch.DoubleTensor"); -THDoubleTensor_norm(arg4,arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_renorm(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -long arg4 = 0; -double arg5 = 0; -if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg4 = (long)lua_tonumber(L, 3)-1; -arg5 = (double)lua_tonumber(L, 4); -arg2 = arg1; -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -&& lua_isnumber(L, 5) -) -{ -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4)-1; -arg5 = (double)lua_tonumber(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double index double", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_renorm(arg1,arg2,arg3,arg4,arg5); -return 1; -} - -static int m_torch_DoubleTensor_dist(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -THDoubleTensor *arg2 = NULL; -double arg3 = 2; -double arg4 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -arg3 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: DoubleTensor DoubleTensor [double]", type_buf); -} -arg4 = THDoubleTensor_dist(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} - -static int m_torch_DoubleTensor_linspace(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 0; -long arg4 = 100; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* double double [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_linspace(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_DoubleTensor_logspace(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -double arg2 = 0; -double arg3 = 0; -long arg4 = 100; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -arg1_idx = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -arg4 = (long)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* double double [long]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_logspace(arg1,arg2,arg3,arg4); -return 1; -} - -static int m_torch_DoubleTensor_log(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_log(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = log(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_log1p(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_log1p(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = log1p(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_exp(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_exp(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = exp(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_cos(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cos(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = cos(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_acos(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_acos(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = acos(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_cosh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cosh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = cosh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_sin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_sin(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sin(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_asin(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_asin(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = asin(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_sinh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_sinh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sinh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_tan(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_tan(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = tan(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_atan(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_atan(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = atan(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_tanh(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_tanh(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = tanh(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_sqrt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_sqrt(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = sqrt(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_round(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_round(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = round(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_ceil(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_ceil(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = ceil(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_floor(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_floor(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = floor(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_trunc(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_trunc(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = trunc(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_abs(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_abs(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = fabs(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_frac(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_frac(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_frac(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_rsqrt(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_rsqrt(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_rsqrt(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_sigmoid(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -double arg4 = 0; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 2; -arg3 = (double)lua_tonumber(L, 1); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] | double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_sigmoid(arg1,arg2); -return 1; -} -else if(argset == 2) -{ -arg4 = TH_sigmoid(arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_neg(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_neg(arg1,arg2); -return 1; -} - -static int m_torch_DoubleTensor_cinv(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -arg1_idx = 1; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_cinv(arg1,arg2); -return 1; -} - -static int m_torch_DoubleTensor_lerp(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -double arg4 = 0; -double arg5 = 0; -double arg6 = 0; -double arg7 = 0; -double arg8 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 3); -arg2 = arg1; -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -argset = 1; -arg1_idx = 1; -arg4 = (double)lua_tonumber(L, 4); -} -else if(narg == 3 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5 = (double)lua_tonumber(L, 1); -arg6 = (double)lua_tonumber(L, 2); -arg7 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor double | double double double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_lerp(arg1,arg2,arg3,arg4); -return 1; -} -else if(argset == 2) -{ -arg8 = TH_lerp(arg5,arg6,arg7); -lua_pushnumber(L, (lua_Number)arg8); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_atan2(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -double arg4 = 0; -double arg5 = 0; -double arg6 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 1; -arg1_idx = 1; -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4 = (double)lua_tonumber(L, 1); -arg5 = (double)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] DoubleTensor | double double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_atan2(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -arg6 = atan2(arg4,arg5); -lua_pushnumber(L, (lua_Number)arg6); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_pow(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THDoubleTensor *arg2 = NULL; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -double arg5 = 0; -THDoubleTensor *arg6 = NULL; -double arg7 = 0; -double arg8 = 0; -double arg9 = 0; -if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 2); -arg2 = arg1; -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg1_idx = 1; -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = (double)lua_tonumber(L, 2); -arg6 = arg4; -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& (arg6 = luaT_toudata(L, 3, "torch.DoubleTensor")) -) -{ -argset = 2; -arg4_idx = 1; -arg5 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 3; -arg7 = (double)lua_tonumber(L, 1); -arg8 = (double)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [DoubleTensor] double | *DoubleTensor* double [DoubleTensor] | double double", type_buf); -} -if(argset == 1) -{ -lua_pushvalue(L, arg1_idx); -THDoubleTensor_pow(arg1,arg2,arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_tpow(arg4,arg5,arg6); -return 1; -} -else if(argset == 3) -{ -arg9 = pow(arg7,arg8); -lua_pushnumber(L, (lua_Number)arg9); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_rand(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [Generator] (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_rand(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_DoubleTensor_randn(lua_State *L) -{ -int narg = lua_gettop(L); -THDoubleTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THLongStorage *arg3 = NULL; -if(narg >= 2 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& torch_islongargs(L, 2) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 2); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg >= 3 -&& (arg1 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& torch_islongargs(L, 3) -) -{ -arg1_idx = 1; -arg3 = torch_checklongargs(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *DoubleTensor* [Generator] (LongStorage | dim1 [dim2...])", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_randn(arg1,arg2,arg3); -THLongStorage_free(arg3); -return 1; -} - -static int m_torch_DoubleTensor_multinomial(lua_State *L) -{ -int narg = lua_gettop(L); -THLongTensor *arg1 = NULL; -int arg1_idx = 0; -THGenerator *arg2 = NULL; -THDoubleTensor *arg3 = NULL; -int arg4 = 0; -int arg5 = 0; -if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 4); -} -else if(narg == 4 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg3 = luaT_toudata(L, 2, "torch.DoubleTensor")) -&& lua_isnumber(L, 3) -&& lua_isboolean(L, 4) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 3); -arg5 = lua_toboolean(L, 4); -lua_getglobal(L,"torch"); -arg2 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 5 -&& (arg1 = luaT_toudata(L, 1, "torch.LongTensor")) -&& (arg2 = luaT_toudata(L, 2, torch_Generator)) -&& (arg3 = luaT_toudata(L, 3, "torch.DoubleTensor")) -&& lua_isnumber(L, 4) -&& lua_isboolean(L, 5) -) -{ -THLongTensor_add(arg1, arg1, -1); -arg1_idx = 1; -arg4 = (int)lua_tonumber(L, 4); -arg5 = lua_toboolean(L, 5); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: *LongTensor* [Generator] DoubleTensor int [boolean]", type_buf); -} -lua_pushvalue(L, arg1_idx); -THDoubleTensor_multinomial(arg1,arg2,arg3,arg4,arg5); -THLongTensor_add(arg1, arg1, 1); -return 1; -} - -static int m_torch_DoubleTensor_uniform(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -double arg7 = 0; -double arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -arg8 = (double)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -arg8 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *DoubleTensor* [Generator] [double] [double]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_uniform(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THDoubleTensor_uniform(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_normal(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -double arg7 = 0; -double arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -arg8 = (double)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -arg8 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *DoubleTensor* [Generator] [double] [double]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_normal(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THDoubleTensor_normal(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_cauchy(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 1; -double arg4 = 0; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -double arg7 = 0; -double arg8 = 1; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -arg8 = (double)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -arg8 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *DoubleTensor* [Generator] [double] [double]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_cauchy(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THDoubleTensor_cauchy(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_logNormal(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 1; -double arg3 = 2; -double arg4 = 0; -THDoubleTensor *arg5 = NULL; -int arg5_idx = 0; -THGenerator *arg6 = NULL; -double arg7 = 1; -double arg8 = 2; -if(narg == 0 -) -{ -argset = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -argset = 1; -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg3 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& lua_isnumber(L, 1) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -arg3 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -arg3 = (double)lua_tonumber(L, 3); -} -else if(narg == 1 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -) -{ -argset = 2; -arg5_idx = 1; -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -) -{ -argset = 2; -arg5_idx = 1; -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -} -else if(narg == 2 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg8 = (double)lua_tonumber(L, 3); -} -else if(narg == 3 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 2); -arg8 = (double)lua_tonumber(L, 3); -lua_getglobal(L,"torch"); -arg6 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 4 -&& (arg5 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg6 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -&& lua_isnumber(L, 4) -) -{ -argset = 2; -arg5_idx = 1; -arg7 = (double)lua_tonumber(L, 3); -arg8 = (double)lua_tonumber(L, 4); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [double] [double] | *DoubleTensor* [Generator] [double] [double]", type_buf); -} -if(argset == 1) -{ -arg4 = THRandom_logNormal(arg1,arg2,arg3); -lua_pushnumber(L, (lua_Number)arg4); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg5_idx); -THDoubleTensor_logNormal(arg5,arg6,arg7,arg8); -return 1; -} -return 0; -} - -static int m_torch_DoubleTensor_exponential(lua_State *L) -{ -int narg = lua_gettop(L); -int argset = 0; -THGenerator *arg1 = NULL; -double arg2 = 0; -double arg3 = 0; -THDoubleTensor *arg4 = NULL; -int arg4_idx = 0; -THGenerator *arg5 = NULL; -double arg6 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -argset = 1; -arg2 = (double)lua_tonumber(L, 2); -} -else if(narg == 2 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& lua_isnumber(L, 2) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 2); -lua_getglobal(L,"torch"); -arg5 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 3 -&& (arg4 = luaT_toudata(L, 1, "torch.DoubleTensor")) -&& (arg5 = luaT_toudata(L, 2, torch_Generator)) -&& lua_isnumber(L, 3) -) -{ -argset = 2; -arg4_idx = 1; -arg6 = (double)lua_tonumber(L, 3); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] double | *DoubleTensor* [Generator] double", type_buf); -} -if(argset == 1) -{ -arg3 = THRandom_exponential(arg1,arg2); -lua_pushnumber(L, (lua_Number)arg3); -return 1; -} -else if(argset == 2) -{ -lua_pushvalue(L, arg4_idx); -THDoubleTensor_exponential(arg4,arg5,arg6); -return 1; -} -return 0; -} - -static const struct luaL_Reg m_torch_DoubleTensorMath__ [] = { -{"zero", m_torch_DoubleTensor_zero}, -{"fill", m_torch_DoubleTensor_fill}, -{"zeros", m_torch_DoubleTensor_zeros}, -{"ones", m_torch_DoubleTensor_ones}, -{"reshape", m_torch_DoubleTensor_reshape}, -{"gather", m_torch_DoubleTensor_gather}, -{"scatter", m_torch_DoubleTensor_scatter}, -{"dot", m_torch_DoubleTensor_dot}, -{"equal", m_torch_DoubleTensor_equal}, -{"add", m_torch_DoubleTensor_add}, -{"csub", m_torch_DoubleTensor_csub}, -{"mul", m_torch_DoubleTensor_mul}, -{"div", m_torch_DoubleTensor_div}, -{"lshift", m_torch_DoubleTensor_lshift}, -{"rshift", m_torch_DoubleTensor_rshift}, -{"fmod", m_torch_DoubleTensor_fmod}, -{"remainder", m_torch_DoubleTensor_remainder}, -{"bitand", m_torch_DoubleTensor_bitand}, -{"bitor", m_torch_DoubleTensor_bitor}, -{"bitxor", m_torch_DoubleTensor_bitxor}, -{"mod", m_torch_DoubleTensor_mod}, -{"clamp", m_torch_DoubleTensor_clamp}, -{"match", m_torch_DoubleTensor_match}, -{"cmul", m_torch_DoubleTensor_cmul}, -{"cpow", m_torch_DoubleTensor_cpow}, -{"cdiv", m_torch_DoubleTensor_cdiv}, -{"clshift", m_torch_DoubleTensor_clshift}, -{"crshift", m_torch_DoubleTensor_crshift}, -{"cfmod", m_torch_DoubleTensor_cfmod}, -{"cremainder", m_torch_DoubleTensor_cremainder}, -{"cbitand", m_torch_DoubleTensor_cbitand}, -{"cbitor", m_torch_DoubleTensor_cbitor}, -{"cbitxor", m_torch_DoubleTensor_cbitxor}, -{"cmod", m_torch_DoubleTensor_cmod}, -{"addcmul", m_torch_DoubleTensor_addcmul}, -{"addcdiv", m_torch_DoubleTensor_addcdiv}, -{"mv", m_torch_DoubleTensor_mv}, -{"mm", m_torch_DoubleTensor_mm}, -{"bmm", m_torch_DoubleTensor_bmm}, -{"ger", m_torch_DoubleTensor_ger}, -{"addmv", m_torch_DoubleTensor_addmv}, -{"addmm", m_torch_DoubleTensor_addmm}, -{"addr", m_torch_DoubleTensor_addr}, -{"addbmm", m_torch_DoubleTensor_addbmm}, -{"baddbmm", m_torch_DoubleTensor_baddbmm}, -{"numel", m_torch_DoubleTensor_numel}, -{"cumsum", m_torch_DoubleTensor_cumsum}, -{"cumprod", m_torch_DoubleTensor_cumprod}, -{"sum", m_torch_DoubleTensor_sum}, -{"prod", m_torch_DoubleTensor_prod}, -{"min", m_torch_DoubleTensor_min}, -{"max", m_torch_DoubleTensor_max}, -{"cmin", m_torch_DoubleTensor_cmin}, -{"cmax", m_torch_DoubleTensor_cmax}, -{"trace", m_torch_DoubleTensor_trace}, -{"cross", m_torch_DoubleTensor_cross}, -{"diag", m_torch_DoubleTensor_diag}, -{"eye", m_torch_DoubleTensor_eye}, -{"range", m_torch_DoubleTensor_range}, -{"randperm", m_torch_DoubleTensor_randperm}, -{"sort", m_torch_DoubleTensor_sort}, -{"topk", m_torch_DoubleTensor_topk}, -{"kthvalue", m_torch_DoubleTensor_kthvalue}, -{"mode", m_torch_DoubleTensor_mode}, -{"median", m_torch_DoubleTensor_median}, -{"tril", m_torch_DoubleTensor_tril}, -{"triu", m_torch_DoubleTensor_triu}, -{"cat", m_torch_DoubleTensor_cat}, -{"random", m_torch_DoubleTensor_random}, -{"geometric", m_torch_DoubleTensor_geometric}, -{"bernoulli", m_torch_DoubleTensor_bernoulli}, -{"squeeze", m_torch_DoubleTensor_squeeze}, -{"sign", m_torch_DoubleTensor_sign}, -{"conv2", m_torch_DoubleTensor_conv2}, -{"xcorr2", m_torch_DoubleTensor_xcorr2}, -{"conv3", m_torch_DoubleTensor_conv3}, -{"xcorr3", m_torch_DoubleTensor_xcorr3}, -{"lt", m_torch_DoubleTensor_lt}, -{"gt", m_torch_DoubleTensor_gt}, -{"le", m_torch_DoubleTensor_le}, -{"ge", m_torch_DoubleTensor_ge}, -{"eq", m_torch_DoubleTensor_eq}, -{"ne", m_torch_DoubleTensor_ne}, -{"nonzero", m_torch_DoubleTensor_nonzero}, -{"mean", m_torch_DoubleTensor_mean}, -{"var", m_torch_DoubleTensor_var}, -{"std", m_torch_DoubleTensor_std}, -{"histc", m_torch_DoubleTensor_histc}, -{"bhistc", m_torch_DoubleTensor_bhistc}, -{"norm", m_torch_DoubleTensor_norm}, -{"renorm", m_torch_DoubleTensor_renorm}, -{"dist", m_torch_DoubleTensor_dist}, -{"linspace", m_torch_DoubleTensor_linspace}, -{"logspace", m_torch_DoubleTensor_logspace}, -{"log", m_torch_DoubleTensor_log}, -{"log1p", m_torch_DoubleTensor_log1p}, -{"exp", m_torch_DoubleTensor_exp}, -{"cos", m_torch_DoubleTensor_cos}, -{"acos", m_torch_DoubleTensor_acos}, -{"cosh", m_torch_DoubleTensor_cosh}, -{"sin", m_torch_DoubleTensor_sin}, -{"asin", m_torch_DoubleTensor_asin}, -{"sinh", m_torch_DoubleTensor_sinh}, -{"tan", m_torch_DoubleTensor_tan}, -{"atan", m_torch_DoubleTensor_atan}, -{"tanh", m_torch_DoubleTensor_tanh}, -{"sqrt", m_torch_DoubleTensor_sqrt}, -{"round", m_torch_DoubleTensor_round}, -{"ceil", m_torch_DoubleTensor_ceil}, -{"floor", m_torch_DoubleTensor_floor}, -{"trunc", m_torch_DoubleTensor_trunc}, -{"abs", m_torch_DoubleTensor_abs}, -{"frac", m_torch_DoubleTensor_frac}, -{"rsqrt", m_torch_DoubleTensor_rsqrt}, -{"sigmoid", m_torch_DoubleTensor_sigmoid}, -{"neg", m_torch_DoubleTensor_neg}, -{"cinv", m_torch_DoubleTensor_cinv}, -{"lerp", m_torch_DoubleTensor_lerp}, -{"atan2", m_torch_DoubleTensor_atan2}, -{"pow", m_torch_DoubleTensor_pow}, -{"rand", m_torch_DoubleTensor_rand}, -{"randn", m_torch_DoubleTensor_randn}, -{"multinomial", m_torch_DoubleTensor_multinomial}, -{"uniform", m_torch_DoubleTensor_uniform}, -{"normal", m_torch_DoubleTensor_normal}, -{"cauchy", m_torch_DoubleTensor_cauchy}, -{"logNormal", m_torch_DoubleTensor_logNormal}, -{"exponential", m_torch_DoubleTensor_exponential}, -{NULL, NULL} -}; - -static const struct luaL_Reg torch_DoubleTensorMath__ [] = { -{"zero", torch_DoubleTensor_zero}, -{"fill", torch_DoubleTensor_fill}, -{"zeros", torch_DoubleTensor_zeros}, -{"ones", torch_DoubleTensor_ones}, -{"reshape", torch_DoubleTensor_reshape}, -{"gather", torch_DoubleTensor_gather}, -{"scatter", torch_DoubleTensor_scatter}, -{"dot", torch_DoubleTensor_dot}, -{"equal", torch_DoubleTensor_equal}, -{"add", torch_DoubleTensor_add}, -{"csub", torch_DoubleTensor_csub}, -{"mul", torch_DoubleTensor_mul}, -{"div", torch_DoubleTensor_div}, -{"lshift", torch_DoubleTensor_lshift}, -{"rshift", torch_DoubleTensor_rshift}, -{"fmod", torch_DoubleTensor_fmod}, -{"remainder", torch_DoubleTensor_remainder}, -{"bitand", torch_DoubleTensor_bitand}, -{"bitor", torch_DoubleTensor_bitor}, -{"bitxor", torch_DoubleTensor_bitxor}, -{"mod", torch_DoubleTensor_mod}, -{"clamp", torch_DoubleTensor_clamp}, -{"match", torch_DoubleTensor_match}, -{"cmul", torch_DoubleTensor_cmul}, -{"cpow", torch_DoubleTensor_cpow}, -{"cdiv", torch_DoubleTensor_cdiv}, -{"clshift", torch_DoubleTensor_clshift}, -{"crshift", torch_DoubleTensor_crshift}, -{"cfmod", torch_DoubleTensor_cfmod}, -{"cremainder", torch_DoubleTensor_cremainder}, -{"cbitand", torch_DoubleTensor_cbitand}, -{"cbitor", torch_DoubleTensor_cbitor}, -{"cbitxor", torch_DoubleTensor_cbitxor}, -{"cmod", torch_DoubleTensor_cmod}, -{"addcmul", torch_DoubleTensor_addcmul}, -{"addcdiv", torch_DoubleTensor_addcdiv}, -{"mv", torch_DoubleTensor_mv}, -{"mm", torch_DoubleTensor_mm}, -{"bmm", torch_DoubleTensor_bmm}, -{"ger", torch_DoubleTensor_ger}, -{"addmv", torch_DoubleTensor_addmv}, -{"addmm", torch_DoubleTensor_addmm}, -{"addr", torch_DoubleTensor_addr}, -{"addbmm", torch_DoubleTensor_addbmm}, -{"baddbmm", torch_DoubleTensor_baddbmm}, -{"numel", torch_DoubleTensor_numel}, -{"cumsum", torch_DoubleTensor_cumsum}, -{"cumprod", torch_DoubleTensor_cumprod}, -{"sum", torch_DoubleTensor_sum}, -{"prod", torch_DoubleTensor_prod}, -{"min", torch_DoubleTensor_min}, -{"max", torch_DoubleTensor_max}, -{"cmin", torch_DoubleTensor_cmin}, -{"cmax", torch_DoubleTensor_cmax}, -{"trace", torch_DoubleTensor_trace}, -{"cross", torch_DoubleTensor_cross}, -{"diag", torch_DoubleTensor_diag}, -{"eye", torch_DoubleTensor_eye}, -{"range", torch_DoubleTensor_range}, -{"randperm", torch_DoubleTensor_randperm}, -{"sort", torch_DoubleTensor_sort}, -{"topk", torch_DoubleTensor_topk}, -{"kthvalue", torch_DoubleTensor_kthvalue}, -{"mode", torch_DoubleTensor_mode}, -{"median", torch_DoubleTensor_median}, -{"tril", torch_DoubleTensor_tril}, -{"triu", torch_DoubleTensor_triu}, -{"cat", torch_DoubleTensor_cat}, -{"random", torch_DoubleTensor_random}, -{"geometric", torch_DoubleTensor_geometric}, -{"bernoulli", torch_DoubleTensor_bernoulli}, -{"squeeze", torch_DoubleTensor_squeeze}, -{"sign", torch_DoubleTensor_sign}, -{"conv2", torch_DoubleTensor_conv2}, -{"xcorr2", torch_DoubleTensor_xcorr2}, -{"conv3", torch_DoubleTensor_conv3}, -{"xcorr3", torch_DoubleTensor_xcorr3}, -{"lt", torch_DoubleTensor_lt}, -{"gt", torch_DoubleTensor_gt}, -{"le", torch_DoubleTensor_le}, -{"ge", torch_DoubleTensor_ge}, -{"eq", torch_DoubleTensor_eq}, -{"ne", torch_DoubleTensor_ne}, -{"nonzero", torch_DoubleTensor_nonzero}, -{"mean", torch_DoubleTensor_mean}, -{"var", torch_DoubleTensor_var}, -{"std", torch_DoubleTensor_std}, -{"histc", torch_DoubleTensor_histc}, -{"bhistc", torch_DoubleTensor_bhistc}, -{"norm", torch_DoubleTensor_norm}, -{"renorm", torch_DoubleTensor_renorm}, -{"dist", torch_DoubleTensor_dist}, -{"linspace", torch_DoubleTensor_linspace}, -{"logspace", torch_DoubleTensor_logspace}, -{"log", torch_DoubleTensor_log}, -{"log1p", torch_DoubleTensor_log1p}, -{"exp", torch_DoubleTensor_exp}, -{"cos", torch_DoubleTensor_cos}, -{"acos", torch_DoubleTensor_acos}, -{"cosh", torch_DoubleTensor_cosh}, -{"sin", torch_DoubleTensor_sin}, -{"asin", torch_DoubleTensor_asin}, -{"sinh", torch_DoubleTensor_sinh}, -{"tan", torch_DoubleTensor_tan}, -{"atan", torch_DoubleTensor_atan}, -{"tanh", torch_DoubleTensor_tanh}, -{"sqrt", torch_DoubleTensor_sqrt}, -{"round", torch_DoubleTensor_round}, -{"ceil", torch_DoubleTensor_ceil}, -{"floor", torch_DoubleTensor_floor}, -{"trunc", torch_DoubleTensor_trunc}, -{"abs", torch_DoubleTensor_abs}, -{"frac", torch_DoubleTensor_frac}, -{"rsqrt", torch_DoubleTensor_rsqrt}, -{"sigmoid", torch_DoubleTensor_sigmoid}, -{"neg", torch_DoubleTensor_neg}, -{"cinv", torch_DoubleTensor_cinv}, -{"lerp", torch_DoubleTensor_lerp}, -{"atan2", torch_DoubleTensor_atan2}, -{"pow", torch_DoubleTensor_pow}, -{"rand", torch_DoubleTensor_rand}, -{"randn", torch_DoubleTensor_randn}, -{"multinomial", torch_DoubleTensor_multinomial}, -{"uniform", torch_DoubleTensor_uniform}, -{"normal", torch_DoubleTensor_normal}, -{"cauchy", torch_DoubleTensor_cauchy}, -{"logNormal", torch_DoubleTensor_logNormal}, -{"exponential", torch_DoubleTensor_exponential}, -{"gesv", torch_DoubleTensor_gesv}, -{"gels", torch_DoubleTensor_gels}, -{"trtrs", torch_DoubleTensor_trtrs}, -{"symeig", torch_DoubleTensor_symeig}, -{"eig", torch_DoubleTensor_eig}, -{"svd", torch_DoubleTensor_svd}, -{"inverse", torch_DoubleTensor_inverse}, -{"potrf", torch_DoubleTensor_potrf}, -{"potrs", torch_DoubleTensor_potrs}, -{"potri", torch_DoubleTensor_potri}, -{"pstrf", torch_DoubleTensor_pstrf}, -{"qr", torch_DoubleTensor_qr}, -{"geqrf", torch_DoubleTensor_geqrf}, -{"orgqr", torch_DoubleTensor_orgqr}, -{"ormqr", torch_DoubleTensor_ormqr}, -{NULL, NULL} -}; - -static void torch_DoubleTensorMath_init(lua_State *L) -{ - luaT_pushmetatable(L, "torch.DoubleTensor"); - - /* register methods */ - luaT_setfuncs(L, m_torch_DoubleTensorMath__, 0); - - /* register functions into the "torch" field of the tensor metaclass */ - lua_pushstring(L, "torch"); - lua_newtable(L); - luaT_setfuncs(L, torch_DoubleTensorMath__, 0); - lua_rawset(L, -3); - lua_pop(L, 1); -} - -static const struct luaL_Reg torch_TensorMath__ [] = { -{"zero", torch_zero}, -{"fill", torch_fill}, -{"zeros", torch_zeros}, -{"ones", torch_ones}, -{"reshape", torch_reshape}, -{"gather", torch_gather}, -{"scatter", torch_scatter}, -{"dot", torch_dot}, -{"equal", torch_equal}, -{"add", torch_add}, -{"csub", torch_csub}, -{"mul", torch_mul}, -{"div", torch_div}, -{"lshift", torch_lshift}, -{"rshift", torch_rshift}, -{"fmod", torch_fmod}, -{"remainder", torch_remainder}, -{"bitand", torch_bitand}, -{"bitor", torch_bitor}, -{"bitxor", torch_bitxor}, -{"mod", torch_mod}, -{"clamp", torch_clamp}, -{"match", torch_match}, -{"cmul", torch_cmul}, -{"cpow", torch_cpow}, -{"cdiv", torch_cdiv}, -{"clshift", torch_clshift}, -{"crshift", torch_crshift}, -{"cfmod", torch_cfmod}, -{"cremainder", torch_cremainder}, -{"cbitand", torch_cbitand}, -{"cbitor", torch_cbitor}, -{"cbitxor", torch_cbitxor}, -{"cmod", torch_cmod}, -{"addcmul", torch_addcmul}, -{"addcdiv", torch_addcdiv}, -{"mv", torch_mv}, -{"mm", torch_mm}, -{"bmm", torch_bmm}, -{"ger", torch_ger}, -{"addmv", torch_addmv}, -{"addmm", torch_addmm}, -{"addr", torch_addr}, -{"addbmm", torch_addbmm}, -{"baddbmm", torch_baddbmm}, -{"numel", torch_numel}, -{"cumsum", torch_cumsum}, -{"cumprod", torch_cumprod}, -{"sum", torch_sum}, -{"prod", torch_prod}, -{"min", torch_min}, -{"max", torch_max}, -{"cmin", torch_cmin}, -{"cmax", torch_cmax}, -{"trace", torch_trace}, -{"cross", torch_cross}, -{"diag", torch_diag}, -{"eye", torch_eye}, -{"range", torch_range}, -{"randperm", torch_randperm}, -{"sort", torch_sort}, -{"topk", torch_topk}, -{"kthvalue", torch_kthvalue}, -{"mode", torch_mode}, -{"median", torch_median}, -{"tril", torch_tril}, -{"triu", torch_triu}, -{"cat", torch_cat}, -{"random", torch_random}, -{"geometric", torch_geometric}, -{"bernoulli", torch_bernoulli}, -{"squeeze", torch_squeeze}, -{"sign", torch_sign}, -{"conv2", torch_conv2}, -{"xcorr2", torch_xcorr2}, -{"conv3", torch_conv3}, -{"xcorr3", torch_xcorr3}, -{"lt", torch_lt}, -{"gt", torch_gt}, -{"le", torch_le}, -{"ge", torch_ge}, -{"eq", torch_eq}, -{"ne", torch_ne}, -{"nonzero", torch_nonzero}, -{"all", torch_all}, -{"any", torch_any}, -{"abs", torch_abs}, -{"mean", torch_mean}, -{"var", torch_var}, -{"std", torch_std}, -{"histc", torch_histc}, -{"bhistc", torch_bhistc}, -{"norm", torch_norm}, -{"renorm", torch_renorm}, -{"dist", torch_dist}, -{"linspace", torch_linspace}, -{"logspace", torch_logspace}, -{"log", torch_log}, -{"log1p", torch_log1p}, -{"exp", torch_exp}, -{"cos", torch_cos}, -{"acos", torch_acos}, -{"cosh", torch_cosh}, -{"sin", torch_sin}, -{"asin", torch_asin}, -{"sinh", torch_sinh}, -{"tan", torch_tan}, -{"atan", torch_atan}, -{"tanh", torch_tanh}, -{"sqrt", torch_sqrt}, -{"round", torch_round}, -{"ceil", torch_ceil}, -{"floor", torch_floor}, -{"trunc", torch_trunc}, -{"frac", torch_frac}, -{"rsqrt", torch_rsqrt}, -{"sigmoid", torch_sigmoid}, -{"neg", torch_neg}, -{"cinv", torch_cinv}, -{"lerp", torch_lerp}, -{"atan2", torch_atan2}, -{"pow", torch_pow}, -{"rand", torch_rand}, -{"randn", torch_randn}, -{"multinomial", torch_multinomial}, -{"uniform", torch_uniform}, -{"normal", torch_normal}, -{"cauchy", torch_cauchy}, -{"logNormal", torch_logNormal}, -{"exponential", torch_exponential}, -{"gesv", torch_gesv}, -{"gels", torch_gels}, -{"trtrs", torch_trtrs}, -{"symeig", torch_symeig}, -{"eig", torch_eig}, -{"svd", torch_svd}, -{"inverse", torch_inverse}, -{"potrf", torch_potrf}, -{"potrs", torch_potrs}, -{"potri", torch_potri}, -{"pstrf", torch_pstrf}, -{"qr", torch_qr}, -{"geqrf", torch_geqrf}, -{"orgqr", torch_orgqr}, -{"ormqr", torch_ormqr}, -{NULL, NULL} -}; - -void torch_TensorMath_init(lua_State *L) -{ - torch_ByteTensorMath_init(L); - torch_CharTensorMath_init(L); - torch_ShortTensorMath_init(L); - torch_IntTensorMath_init(L); - torch_LongTensorMath_init(L); - torch_FloatTensorMath_init(L); - torch_DoubleTensorMath_init(L); - luaT_setfuncs(L, torch_TensorMath__, 0); -} diff --git a/contrib/lua-torch/torch7/TensorMath.lua b/contrib/lua-torch/torch7/TensorMath.lua deleted file mode 100644 index 45e07c63e9..0000000000 --- a/contrib/lua-torch/torch7/TensorMath.lua +++ /dev/null @@ -1,1527 +0,0 @@ -local wrap = require 'cwrap' - -require 'torchcwrap' - -local interface = wrap.CInterface.new() -local method = wrap.CInterface.new() -local argtypes = wrap.CInterface.argtypes - -argtypes['ptrdiff_t'] = wrap.types.ptrdiff_t - -interface:print([[ -#include "TH.h" -#include "THMath.h" -#include "luaT.h" -#include "utils.h" -]]) - --- specific to torch: we generate a 'dispatch' function --- first we create a helper function --- note that it let the "torch" table on the stack -interface:print([[ -static const void* torch_istensortype(lua_State *L, const char *tname) -{ - if(!tname) - return NULL; - - if(!luaT_pushmetatable(L, tname)) - return NULL; - - lua_pushstring(L, "torch"); - lua_rawget(L, -2); - if(lua_istable(L, -1)) - return tname; - else - { - lua_pop(L, 2); - return NULL; - } - - return NULL; -} -]]) - -interface:print([[ -static int torch_isnonemptytable(lua_State *L, int idx) -{ - int empty; - if (!lua_istable(L, idx)) return 0; - - lua_rawgeti(L, idx, 1); - empty = lua_isnil(L, -1); - lua_pop(L, 1); - return !empty; -} -]]) - - -interface:print([[ -static const void* torch_istensorarray(lua_State *L, int idx) -{ - const char* tname; - int tensor_idx; - if (!torch_isnonemptytable(L, idx)) return 0; - - lua_checkstack(L, 3); - lua_rawgeti(L, idx, 1); - tensor_idx = lua_gettop(L); - tname = (torch_istensortype(L, luaT_typename(L, -1))); - lua_remove(L, tensor_idx); - return tname; -} -]]) - -interface.dispatchregistry = {} -function interface:wrap(name, ...) - -- usual stuff - wrap.CInterface.wrap(self, name, ...) - - -- dispatch function - if not interface.dispatchregistry[name] then - interface.dispatchregistry[name] = true - table.insert(interface.dispatchregistry, {name=name, wrapname=string.format("torch_%s", name)}) - - interface:print(string.gsub([[ -static int torch_NAME(lua_State *L) -{ - int narg = lua_gettop(L); - const void *tname; - if(narg >= 1 && (tname = torch_istensortype(L, luaT_typename(L, 1)))) /* first argument is tensor? */ - { - } - else if(narg >= 2 && (tname = torch_istensortype(L, luaT_typename(L, 2)))) /* second? */ - { - } - else if(narg >= 1 && (tname = torch_istensorarray(L, 1))) /* torch table argument? */ - { - } - else if(narg >= 1 && lua_type(L, narg) == LUA_TSTRING - && (tname = torch_istensortype(L, lua_tostring(L, narg)))) /* do we have a valid tensor type string then? */ - { - lua_remove(L, -2); - } - else if(!(tname = torch_istensortype(L, torch_getdefaulttensortype(L)))) - luaL_error(L, "internal error: the default tensor type does not seem to be an actual tensor"); - - lua_pushstring(L, "NAME"); - lua_rawget(L, -2); - if(lua_isfunction(L, -1)) - { - lua_insert(L, 1); - lua_pop(L, 2); /* the two tables we put on the stack above */ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - } - else - return luaL_error(L, "%s does not implement the torch.NAME() function", tname); - - return lua_gettop(L); -} -]], 'NAME', name)) - end -end - -function interface:dispatchregister(name) - local txt = self.txt - table.insert(txt, string.format('static const struct luaL_Reg %s [] = {', name)) - for _,reg in ipairs(self.dispatchregistry) do - table.insert(txt, string.format('{"%s", %s},', reg.name, reg.wrapname)) - end - table.insert(txt, '{NULL, NULL}') - table.insert(txt, '};') - table.insert(txt, '') - self.dispatchregistry = {} -end - -interface:print('/* WARNING: autogenerated file */') -interface:print('') - -local function wrap(...) - local args = {...} - - -- interface - interface:wrap(...) - - -- method: we override things possibly in method table field - for _,x in ipairs(args) do - if type(x) == 'table' then -- ok, now we have a list of args - for _, arg in ipairs(x) do - if arg.method then - for k,v in pairs(arg.method) do - if v == 'nil' then -- special case, we erase the field - arg[k] = nil - else - arg[k] = v - end - end - end - end - end - end - local unpack = unpack or table.unpack - method:wrap(unpack(args)) -end - -local reals = {ByteTensor='unsigned char', - CharTensor='char', - ShortTensor='short', - IntTensor='int', - LongTensor='long', - FloatTensor='float', - HalfTensor='half', - DoubleTensor='double'} - -local accreals = {ByteTensor='long', - CharTensor='long', - ShortTensor='long', - IntTensor='long', - LongTensor='long', - FloatTensor='double', - HalfTensor='float', - DoubleTensor='double'} - -for _,Tensor in ipairs({"ByteTensor", "CharTensor", - "ShortTensor", "IntTensor", "LongTensor", - "FloatTensor", "HalfTensor", "DoubleTensor"}) do - - local real = reals[Tensor] - local accreal = accreals[Tensor] - - function interface.luaname2wrapname(self, name) - return string.format('torch_%s_%s', Tensor, name) - end - - function method.luaname2wrapname(self, name) - return string.format('m_torch_%s_%s', Tensor, name) - end - - local function cname(name) - return string.format('TH%s_%s', Tensor, name) - end - - local function lastdim(argn) - return function(arg) - return string.format("TH%s_nDimension(%s)", Tensor, arg.args[argn]:carg()) - end - end - - local function lastdimarray(argn) - return function(arg) - return string.format("TH%s_nDimension(arg%d_data[0])", Tensor, arg.args[argn].i) - end - end - - if Tensor ~= 'HalfTensor' then - wrap("zero", - cname("zero"), - {{name=Tensor, returned=true}}) - - wrap("fill", - cname("fill"), - {{name=Tensor, returned=true}, - {name=real}}) - - wrap("zeros", - cname("zeros"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name="LongArg"}}) - - wrap("ones", - cname("ones"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name="LongArg"}}) - - wrap("reshape", - cname("reshape"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="LongArg"}}) - - wrap("gather", - cname("gather"), - {{name=Tensor, default=true, returned=true, - init=function(arg) - return table.concat( - { - arg.__metatable.init(arg), - string.format("THLongStorage* %s_size = THLongTensor_newSizeOf(%s);", arg:carg(), arg.args[4]:carg()), - string.format("TH%s_resize(%s, %s_size, NULL);", Tensor, arg:carg(), arg:carg()), - string.format("THLongStorage_free(%s_size);", arg:carg()) - }, '\n') - end - }, - {name=Tensor}, - {name="index"}, - {name="IndexTensor", noreadadd=true}}) - - wrap("scatter", - cname("scatter"), - {{name=Tensor, returned=true}, - {name="index"}, - {name="IndexTensor", noreadadd=true}, - {name=Tensor}}, - cname("scatterFill"), - {{name=Tensor, returned=true}, - {name="index"}, - {name="IndexTensor", noreadadd=true}, - {name=real}}) - - wrap("dot", - cname("dot"), - {{name=Tensor}, - {name=Tensor}, - {name=accreal, creturned=true}}) - - wrap("equal", - cname("equal"), - {{name=Tensor}, - {name=Tensor}, - {name="boolean", creturned=true}}) - - wrap("add", - cname("add"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}, - cname("cadd"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real, default=1}, - {name=Tensor}}) - - wrap("csub", - cname("sub"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}, - cname("csub"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real, default=1}, - {name=Tensor}}) - - wrap("mul", - cname("mul"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}) - - wrap("div", - cname("div"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}) - - wrap("lshift", - cname("lshift"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}) - - wrap("rshift", - cname("rshift"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}) - - wrap("fmod", - cname("fmod"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}) - - wrap("remainder", - cname("remainder"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}) - - wrap("bitand", - cname("bitand"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}) - - wrap("bitor", - cname("bitor"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}) - - wrap("bitxor", - cname("bitxor"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}) - - -- mod alias - wrap("mod", - cname("fmod"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}) - - wrap("clamp", - cname("clamp"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}, - {name=real}}) - - - wrap("match", - cname("match"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor}, - {name=Tensor}, - {name=real, default=1} - }) - - wrap("cmul", - cname("cmul"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - wrap("cpow", - cname("cpow"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - wrap("cdiv", - cname("cdiv"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - wrap("clshift", - cname("clshift"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - wrap("crshift", - cname("crshift"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - wrap("cfmod", - cname("cfmod"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - wrap("cremainder", - cname("cremainder"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - wrap("cbitand", - cname("cbitand"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - wrap("cbitor", - cname("cbitor"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - wrap("cbitxor", - cname("cbitxor"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - -- cmod alias - wrap("cmod", - cname("cfmod"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}) - - wrap("addcmul", - cname("addcmul"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real, default=1}, - {name=Tensor}, - {name=Tensor}}) - - wrap("addcdiv", - cname("addcdiv"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real, default=1}, - {name=Tensor}, - {name=Tensor}}) - - wrap("mv", - cname("addmv"), - {{name=Tensor, default=true, returned=true, method={default='nil'}, - init=function(arg) - return table.concat( - { - arg.__metatable.init(arg), - string.format("TH%s_resize1d(%s, %s->size[0]);", Tensor, arg:carg(), arg.args[5]:carg()) - }, '\n') - end, - precall=function(arg) - return table.concat( - { - string.format("TH%s_zero(%s);", Tensor, arg:carg()), - arg.__metatable.precall(arg) - }, '\n') - end, - }, - {name=real, default=0, invisible=true}, - {name=Tensor, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=2}, - {name=Tensor, dim=1}} - ) - - wrap("mm", - cname("addmm"), - {{name=Tensor, default=true, returned=true, method={default='nil'}, - init=function(arg) - return table.concat( - { - arg.__metatable.init(arg), - string.format("TH%s_resize2d(%s, %s->size[0], %s->size[1]);", Tensor, arg:carg(), arg.args[5]:carg(), arg.args[6]:carg()) - }, '\n') - end, - precall=function(arg) - return table.concat( - { - string.format("TH%s_zero(%s);", Tensor, arg:carg()), - arg.__metatable.precall(arg) - }, '\n') - end, - }, - {name=real, default=0, invisible=true}, - {name=Tensor, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=2}, - {name=Tensor, dim=2}} - ) - - wrap("bmm", - cname("baddbmm"), - {{name=Tensor, default=true, returned=true, method={default='nil'}, - init=function(arg) - return table.concat( - { - arg.__metatable.init(arg), - string.format("TH%s_resize3d(%s, %s->size[0], %s->size[1], %s->size[2]);", - Tensor, arg:carg(), arg.args[5]:carg(), arg.args[5]:carg(), arg.args[6]:carg()) - }, '\n') - end, - precall=function(arg) - return table.concat( - { - string.format("TH%s_zero(%s);", Tensor, arg:carg()), - arg.__metatable.precall(arg) - }, '\n') - end, - }, - {name=real, default=0, invisible=true}, - {name=Tensor, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=3}, - {name=Tensor, dim=3}} - ) - - wrap("ger", - cname("addr"), - {{name=Tensor, default=true, returned=true, method={default='nil'}, - init=function(arg) - return table.concat( - { - arg.__metatable.init(arg), - string.format("TH%s_resize2d(%s, %s->size[0], %s->size[0]);", Tensor, arg:carg(), arg.args[5]:carg(), arg.args[6]:carg()) - }, '\n') - end, - precall=function(arg) - return table.concat( - { - string.format("TH%s_zero(%s);", Tensor, arg:carg()), - arg.__metatable.precall(arg) - }, '\n') - end - }, - {name=real, default=1, invisible=true}, - {name=Tensor, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=1}, - {name=Tensor, dim=1}} - ) - - for _,f in ipairs({ - {name="addmv", dim1=1, dim2=2, dim3=1}, - {name="addmm", dim1=2, dim2=2, dim3=2}, - {name="addr", dim1=2, dim2=1, dim3=1}, - {name="addbmm", dim1=2, dim2=3, dim3=3}, - {name="baddbmm", dim1=3, dim2=3, dim3=3}, - } - ) do - - interface:wrap(f.name, - cname(f.name), - {{name=Tensor, default=true, returned=true}, - {name=real, default=1}, - {name=Tensor, dim=f.dim1}, - {name=real, default=1}, - {name=Tensor, dim=f.dim2}, - {name=Tensor, dim=f.dim3}}) - - -- there is an ambiguity here, hence the more complicated setup - method:wrap(f.name, - cname(f.name), - {{name=Tensor, returned=true, dim=f.dim1}, - {name=real, default=1, invisible=true}, - {name=Tensor, default=1, dim=f.dim1}, - {name=real, default=1}, - {name=Tensor, dim=f.dim2}, - {name=Tensor, dim=f.dim3}}, - cname(f.name), - {{name=Tensor, returned=true, dim=f.dim1}, - {name=real}, - {name=Tensor, default=1, dim=f.dim1}, - {name=real}, - {name=Tensor, dim=f.dim2}, - {name=Tensor, dim=f.dim3}}) - end - - wrap("numel", - cname("numel"), - {{name=Tensor}, - {name="ptrdiff_t", creturned=true}}) - - for _,name in ipairs({"cumsum", "cumprod"}) do - wrap(name, - cname(name), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="index", default=1}}) - end - - wrap("sum", - cname("sumall"), - {{name=Tensor}, - {name=accreal, creturned=true}}, - cname("sum"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="index"}, - {name="boolean", default=true, invisible=true}}) - - wrap("prod", - cname("prodall"), - {{name=Tensor}, - {name=accreal, creturned=true}}, - cname("prod"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="index"}, - {name="boolean", default=true, invisible=true}}) - - for _,name in ipairs({"min", "max"}) do - wrap(name, - cname(name .. "all"), - {{name=Tensor}, - {name=real, creturned=true}}, - cname(name), - {{name=Tensor, default=true, returned=true}, - {name="IndexTensor", default=true, returned=true, noreadadd=true}, - {name=Tensor}, - {name="index"}, - {name="boolean", default=true, invisible=true}}) - end - - for _,name in ipairs({"cmin", "cmax"}) do - wrap(name, - cname(name), - {{name=Tensor, default=true, returned=true}, - {name=Tensor, method={default=1}}, - {name=Tensor}}, - cname(name .. "Value"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor, method={default=1}}, - {name=real}}) - end - - wrap("trace", - cname("trace"), - {{name=Tensor}, - {name=accreal, creturned=true}}) - - wrap("cross", - cname("cross"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name=Tensor}, - {name="index", default=0}}) - - wrap("diag", - cname("diag"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="long", default=0}}) - - wrap("eye", - cname("eye"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name="long"}, - {name="long", default=0}}) - - wrap("range", - cname("range"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=accreal}, - {name=accreal}, - {name=accreal, default=1}}) - - wrap("randperm", - cname("randperm"), - {{name=Tensor, default=true, returned=true, method={default='nil'}, - postcall=function(arg) - return table.concat( - { - arg.__metatable.postcall(arg), - string.format("TH%s_add(%s, %s, 1);", Tensor, arg:carg(), arg:carg()) - }, '\n') - end}, - {name="Generator", default=true}, - {name="long"}}) - - wrap("sort", - cname("sort"), - {{name=Tensor, default=true, returned=true}, - {name="IndexTensor", default=true, returned=true, noreadadd=true}, - {name=Tensor}, - {name="index", default=lastdim(3)}, - {name="boolean", default=0}}) - -wrap("topk", - cname("topk"), - {{name=Tensor, default=true, returned=true}, - {name="IndexTensor", default=true, returned=true, noreadadd=true}, - {name=Tensor}, - {name="long", default=1}, - {name="index", default=lastdim(3)}, - {name="boolean", default=0}, - {name="boolean", default=0}}) - - wrap("kthvalue", - cname("kthvalue"), - {{name=Tensor, default=true, returned=true}, - {name="IndexTensor", default=true, returned=true, noreadadd=true}, - {name=Tensor}, - {name="long"}, - {name="index", default=lastdim(3)}, - {name="boolean", default=true, invisible=true}}) - - wrap("mode", - cname("mode"), - {{name=Tensor, default=true, returned=true}, - {name="IndexTensor", default=true, returned=true, noreadadd=true}, - {name=Tensor}, - {name="index", default=lastdim(3)}, - {name="boolean", default=true, invisible=true}}) - - wrap("median", - cname("median"), - {{name=Tensor, default=true, returned=true}, - {name="IndexTensor", default=true, returned=true, noreadadd=true}, - {name=Tensor}, - {name="index", default=lastdim(3)}, - {name="boolean", default=true, invisible=true}}) - - wrap("tril", - cname("tril"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="int", default=0}}) - - wrap("triu", - cname("triu"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="int", default=0}}) - - wrap("cat", - cname("cat"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name=Tensor}, - {name="index", default=-1}}, - cname("catArray"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor .. "Array"}, - {name="index", default=-1}}) - - if Tensor == 'ByteTensor' then -- we declare this only once - interface:print( - [[ -static long THRandom_random2__(THGenerator *gen, long a, long b) -{ - THArgCheck(b >= a, 2, "upper bound must be larger than lower bound"); - return((THRandom_random(gen) % (b+1-a)) + a); -} - -static long THRandom_random1__(THGenerator *gen, long b) -{ - THArgCheck(b > 0, 1, "upper bound must be strictly positive"); - return(THRandom_random(gen) % b + 1); -} - ]]) - end - - interface:print(string.gsub( - [[ -static void THTensor_random2__(THTensor *self, THGenerator *gen, long a, long b) -{ - THArgCheck(b >= a, 2, "upper bound must be larger than lower bound"); - TH_TENSOR_APPLY(real, self, *self_data = ((THRandom_random(gen) % (b+1-a)) + a);) -} - -static void THTensor_random1__(THTensor *self, THGenerator *gen, long b) -{ - THArgCheck(b > 0, 1, "upper bound must be strictly positive"); - TH_TENSOR_APPLY(real, self, *self_data = (THRandom_random(gen) % b + 1);) -} -]], 'Tensor', Tensor):gsub('real', real)) - - wrap('random', - 'THRandom_random2__', - {{name='Generator', default=true}, - {name='long'}, - {name='long'}, - {name='long', creturned=true}}, - 'THRandom_random1__', - {{name='Generator', default=true}, - {name='long'}, - {name='long', creturned=true}}, - 'THRandom_random', - {{name='Generator', default=true}, - {name='long', creturned=true}}, - cname("random2__"), - {{name=Tensor, returned=true}, - {name='Generator', default=true}, - {name='long'}, - {name='long'}}, - cname("random1__"), - {{name=Tensor, returned=true}, - {name='Generator', default=true}, - {name='long'}}, - cname("random"), - {{name=Tensor, returned=true}, - {name='Generator', default=true}}) - - wrap("geometric", - "THRandom_geometric", - {{name="Generator", default=true}, - {name="double"}, - {name="double", creturned=true}}, - cname("geometric"), - {{name=Tensor, returned=true}, - {name="Generator", default=true}, - {name="double"}}) - - wrap("bernoulli", - "THRandom_bernoulli", - {{name="Generator", default=true}, - {name="double", default=0.5}, - {name="double", creturned=true}}, - cname("bernoulli"), - {{name=Tensor, returned=true}, - {name="Generator", default=true}, - {name="double", default=0.5}}, - cname("bernoulli_FloatTensor"), - {{name=Tensor, returned=true}, - {name="Generator", default=true}, - {name="FloatTensor"}}, - cname("bernoulli_DoubleTensor"), - {{name=Tensor, returned=true}, - {name="Generator", default=true}, - {name="DoubleTensor"}}) - - wrap("squeeze", - cname("squeeze"), - {{name=Tensor, default=true, returned=true, postcall=function(arg) - local txt = {} - if arg.returned then - table.insert(txt, string.format('if(arg%d->nDimension == 1 && arg%d->size[0] == 1)', arg.i, arg.i)) -- number - table.insert(txt, string.format('lua_pushnumber(L, (lua_Number)(*TH%s_data(arg%d)));', Tensor, arg.i)) - end - return table.concat(txt, '\n') - end}, - {name=Tensor}}, - cname("squeeze1d"), - {{name=Tensor, default=true, returned=true, - - postcall= - function(arg) - local txt = {} - if arg.returned then - table.insert(txt, string.format('if(!hasdims && arg%d->nDimension == 1 && arg%d->size[0] == 1)', arg.i, arg.i)) -- number - table.insert(txt, string.format('lua_pushnumber(L, (lua_Number)(*TH%s_data(arg%d)));}', Tensor, arg.i)) - end - return table.concat(txt, '\n') - end}, - - {name=Tensor, - - precall= - function(arg) - return string.format('{int hasdims = arg%d->nDimension > 1;', arg.i) - end}, - - {name="index"}}) - - wrap("sign", - cname("sign"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}}) - - wrap("conv2", - cname("conv2Dmul"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=2}, - {name=Tensor, dim=2}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="C", invisible=true}}, - cname("conv2Dcmul"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=3}, - {name=Tensor, dim=3}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="C", invisible=true}}, - cname("conv2Dmv"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=3}, - {name=Tensor, dim=4}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="C", invisible=true}} - ) - - wrap("xcorr2", - cname("conv2Dmul"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=2}, - {name=Tensor, dim=2}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="X", invisible=true}}, - cname("conv2Dcmul"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=3}, - {name=Tensor, dim=3}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="X", invisible=true}}, - cname("conv2Dmv"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=3}, - {name=Tensor, dim=4}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="X", invisible=true}} - ) - - wrap("conv3", - cname("conv3Dmul"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=3}, - {name=Tensor, dim=3}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="C", invisible=true}}, - cname("conv3Dcmul"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=4}, - {name=Tensor, dim=4}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="C", invisible=true}}, - cname("conv3Dmv"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=4}, - {name=Tensor, dim=5}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="C", invisible=true}} - ) - - wrap("xcorr3", - cname("conv3Dmul"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=3}, - {name=Tensor, dim=3}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="X", invisible=true}}, - cname("conv3Dcmul"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=4}, - {name=Tensor, dim=4}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="X", invisible=true}}, - cname("conv3Dmv"), - {{name=Tensor, default=true, returned=true}, - {name=real, default=0, invisible=true}, - {name=real, default=1, invisible=true}, - {name=Tensor, dim=4}, - {name=Tensor, dim=5}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name=real, default=1, invisible=true}, - {name='charoption', values={'V', 'F'}, default='V'}, - {name='charoption', default="X", invisible=true}} - ) - - for _,name in pairs({'lt','gt','le','ge','eq','ne'}) do - wrap(name, - cname(name .. 'Value'), - {{name='ByteTensor',default=true, returned=true}, - {name=Tensor}, - {name=real}}, - cname(name .. 'ValueT'), - {{name=Tensor, returned=true}, - {name=Tensor}, - {name=real}}, - cname(name .. 'Tensor'), - {{name='ByteTensor',default=true, returned=true}, - {name=Tensor}, - {name=Tensor}}, - cname(name .. 'TensorT'), - {{name=Tensor, returned=true}, - {name=Tensor}, - {name=Tensor}}) - end - - wrap("nonzero", - cname("nonzero"), - {{name="IndexTensor", default=true, returned=true}, - {name=Tensor}}) - end -- ~= HalfTensor - - if Tensor == 'ByteTensor' then - -- Logical accumulators only apply to ByteTensor - for _,name in ipairs({'all', 'any'}) do - wrap(name, - cname('logical' .. name), - {{name=Tensor}, - {name="boolean", creturned=true}}) - end - end - - if Tensor == 'IntTensor' then - wrap("abs", - cname("abs"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}}, - "abs", - {{name=real}, - {name=real, creturned=true}}) - elseif Tensor == 'LongTensor' then - wrap("abs", - cname("abs"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}}, - "labs", - {{name=real}, - {name=real, creturned=true}}) - end - - if Tensor == 'FloatTensor' or Tensor == 'DoubleTensor' then - - wrap("mean", - cname("meanall"), - {{name=Tensor}, - {name=accreal, creturned=true}}, - cname("mean"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="index"}, - {name="boolean", default=true, invisible=true}}) - - for _,name in ipairs({"var", "std"}) do - wrap(name, - cname(name .. "all"), - {{name=Tensor}, - {name=accreal, creturned=true}}, - cname(name), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="index"}, - {name="boolean", default=false}, - {name="boolean", default=true, invisible=true}}) - end - wrap("histc", - cname("histc"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="long",default=100}, - {name="double",default=0}, - {name="double",default=0}}) - - wrap("bhistc", - cname("bhistc"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name="long",default=100}, - {name="double",default=0}, - {name="double",default=0}}) - - wrap("norm", - cname("normall"), - {{name=Tensor}, - {name=real, default=2}, - {name=accreal, creturned=true}}, - cname("norm"), - {{name=Tensor, default=true, returned=true}, - {name=Tensor}, - {name=real}, - {name="index"}, - {name="boolean", default=true, invisible=true}}) - - wrap("renorm", - cname("renorm"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}, - {name="index"}, - {name=real}}) - - wrap("dist", - cname("dist"), - {{name=Tensor}, - {name=Tensor}, - {name=real, default=2}, - {name=accreal, creturned=true}}) - - wrap("linspace", - cname("linspace"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=real}, - {name=real}, - {name="long", default=100}}) - - wrap("logspace", - cname("logspace"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=real}, - {name=real}, - {name="long", default=100}}) - - for _,name in ipairs({"log", "log1p", "exp", - "cos", "acos", "cosh", - "sin", "asin", "sinh", - "tan", "atan", "tanh", - "sqrt", "round", "ceil", - "floor", "trunc", }) do - wrap(name, - cname(name), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}}, - name, - {{name=real}, - {name=real, creturned=true}}) - end - - wrap("abs", - cname("abs"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}}, - "fabs", - {{name=real}, - {name=real, creturned=true}}) - - wrap("frac", - cname("frac"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}}, - "TH_frac", - {{name=real}, - {name=real, creturned=true}}) - - wrap("rsqrt", - cname("rsqrt"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}}, - "TH_rsqrt", - {{name=real}, - {name=real, creturned=true}}) - - wrap("sigmoid", - cname("sigmoid"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}}, - "TH_sigmoid", - {{name=real}, - {name=real, creturned=true}}) - - wrap("neg", - cname("neg"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}}) - - wrap("cinv", - cname("cinv"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}}) - - wrap("lerp", - cname("lerp"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}, - {name=real}}, - "TH_lerp", - {{name=real}, - {name=real}, - {name=real}, - {name=real, creturned=true}}) - - wrap("atan2", - cname("atan2"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=Tensor}}, - "atan2", - {{name=real}, - {name=real}, - {name=real, creturned=true}}) - - wrap("pow", - cname("pow"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=Tensor, method={default=1}}, - {name=real}}, - cname("tpow"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name=real}, - {name=Tensor, method={default=1}}}, - "pow", - {{name=real}, - {name=real}, - {name=real, creturned=true}}) - - wrap("rand", - cname("rand"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name='Generator', default=true}, - {name="LongArg"}}) - - wrap("randn", - cname("randn"), - {{name=Tensor, default=true, returned=true, method={default='nil'}}, - {name='Generator', default=true}, - {name="LongArg"}}) - - wrap("multinomial", - cname("multinomial"), - {{name="IndexTensor", default=true, returned=true, method={default='nil'}}, - {name='Generator', default=true}, - {name=Tensor}, - {name="int"}, - {name="boolean", default=false}}) - - for _,f in ipairs({{name='uniform', a=0, b=1}, - {name='normal', a=0, b=1}, - {name='cauchy', a=0, b=1}, - {name='logNormal', a=1, b=2}}) do - - wrap(f.name, - string.format("THRandom_%s", f.name), - {{name='Generator', default=true}, - {name="double", default=f.a}, - {name="double", default=f.b}, - {name="double", creturned=true}}, - cname(f.name), - {{name=Tensor, returned=true}, - {name='Generator', default=true}, - {name=real, default=f.a}, - {name=real, default=f.b}}) - end - - for _,f in ipairs({{name='exponential'}}) do - - wrap(f.name, - string.format("THRandom_%s", f.name), - {{name='Generator', default=true}, - {name="double", default=f.a}, - {name="double", creturned=true}}, - cname(f.name), - {{name=Tensor, returned=true}, - {name='Generator', default=true}, - {name=real, default=f.a}}) - end - - for _,name in ipairs({"gesv","gels"}) do - interface:wrap(name, - cname(name), - {{name=Tensor, returned=true}, - {name=Tensor, returned=true}, - {name=Tensor}, - {name=Tensor}}, - cname(name), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}, - {name=Tensor}} - ) - end - interface:wrap("trtrs", - cname("trtrs"), - {{name=Tensor, returned=true}, - {name=Tensor, returned=true}, - {name=Tensor}, - {name=Tensor}, - {name='charoption', values={'U', 'L'}, default='U'}, -- uplo - {name='charoption', values={'N', 'T'}, default='N'}, -- trans - {name='charoption', values={'N', 'U'}, default='N'}}, -- diag - cname("trtrs"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}, - {name=Tensor}, - {name='charoption', values={'U', 'L'}, default='U'}, -- uplo - {name='charoption', values={'N', 'T'}, default='N'}, -- trans - {name='charoption', values={'N', 'U'}, default='N'}} -- diag - ) - - interface:wrap("symeig", - cname("syev"), - {{name=Tensor, returned=true}, - {name=Tensor, returned=true}, - {name=Tensor}, - {name='charoption', values={'N', 'V'}, default='N'}, - {name='charoption', values={'U', 'L'}, default='U'}}, - cname("syev"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}, - {name='charoption', values={'N', 'V'}, default='N'}, - {name='charoption', values={'U', 'L'}, default='U'}} - ) - interface:wrap("eig", - cname("geev"), - {{name=Tensor, returned=true}, - {name=Tensor, returned=true}, - {name=Tensor}, - {name='charoption', values={'N', 'V'}, default='N'}}, - cname("geev"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}, - {name='charoption', values={'N', 'V'}, default='N'}} - ) - - interface:wrap("svd", - cname("gesvd"), - {{name=Tensor, returned=true}, - {name=Tensor, returned=true}, - {name=Tensor, returned=true}, - {name=Tensor}, - {name='charoption', values={'A', 'S'}, default='S'}}, - cname("gesvd"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}, - {name='charoption', values={'A', 'S'}, default='S'}} - ) - interface:wrap("inverse", - cname("getri"), - {{name=Tensor, returned=true}, - {name=Tensor}}, - cname("getri"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}} - ) - interface:wrap("potrf", - cname("potrf"), - {{name=Tensor, returned=true}, - {name=Tensor}, - {name='charoption', values={'U', 'L'}, default='U'}}, -- uplo - cname("potrf"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}, - {name='charoption', values={'U', 'L'}, default='U'}} - ) - interface:wrap("potrs", - cname("potrs"), - {{name=Tensor, returned=true}, - {name=Tensor}, - {name=Tensor}, - {name='charoption', values={'U', 'L'}, default='U'}}, -- uplo - cname("potrs"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}, - {name=Tensor}, - {name='charoption', values={'U', 'L'}, default='U'}} - ) - interface:wrap("potri", - cname("potri"), - {{name=Tensor, returned=true}, - {name=Tensor}, - {name='charoption', values={'U', 'L'}, default='U'}}, -- uplo - cname("potri"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}, - {name='charoption', values={'U', 'L'}, default='U'}} -- uplo - ) - interface:wrap("pstrf", - cname("pstrf"), - {{name=Tensor, returned=true}, - {name='IntTensor', returned=true}, - {name=Tensor}, - {name='charoption', values={'U', 'L'}, default='U'}, -- uplo - {name=real, default=-1}}, - cname("pstrf"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name='IntTensor', default=true, returned=true, invisible=true}, - {name=Tensor}, - {name='charoption', values={'U', 'L'}, default='U'}, -- uplo - {name=real, default=-1}} - ) - interface:wrap("qr", - cname("qr"), - {{name=Tensor, returned=true}, - {name=Tensor, returned=true}, - {name=Tensor}}, - cname("qr"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}} - ) - interface:wrap("geqrf", - cname("geqrf"), - {{name=Tensor, returned=true}, - {name=Tensor, returned=true}, - {name=Tensor}}, - cname("geqrf"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}} - ) - interface:wrap("orgqr", - cname("orgqr"), - {{name=Tensor, returned=true}, - {name=Tensor}, - {name=Tensor}}, - cname("orgqr"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}, - {name=Tensor}} - ) - interface:wrap("ormqr", - cname("ormqr"), - {{name=Tensor, returned=true}, - {name=Tensor}, - {name=Tensor}, - {name=Tensor}, - {name='charoption', values={'L', 'R'}, default='L'}, - {name='charoption', values={'N', 'T'}, default='N'}}, - cname("ormqr"), - {{name=Tensor, default=true, returned=true, invisible=true}, - {name=Tensor}, - {name=Tensor}, - {name=Tensor}, - {name='charoption', values={'L', 'R'}, default='L'}, - {name='charoption', values={'N', 'T'}, default='N'}} - ) - end - - method:register(string.format("m_torch_%sMath__", Tensor)) - interface:print(method:tostring()) - method:clearhistory() - interface:register(string.format("torch_%sMath__", Tensor)) - - interface:print(string.gsub([[ -static void torch_TensorMath_init(lua_State *L) -{ - luaT_pushmetatable(L, "torch.Tensor"); - - /* register methods */ - luaT_setfuncs(L, m_torch_TensorMath__, 0); - - /* register functions into the "torch" field of the tensor metaclass */ - lua_pushstring(L, "torch"); - lua_newtable(L); - luaT_setfuncs(L, torch_TensorMath__, 0); - lua_rawset(L, -3); - lua_pop(L, 1); -} -]], 'Tensor', Tensor)) -end - -interface:dispatchregister("torch_TensorMath__") - -interface:print([[ -void torch_TensorMath_init(lua_State *L) -{ - torch_ByteTensorMath_init(L); - torch_CharTensorMath_init(L); - torch_ShortTensorMath_init(L); - torch_IntTensorMath_init(L); - torch_LongTensorMath_init(L); - torch_FloatTensorMath_init(L); - torch_DoubleTensorMath_init(L); - luaT_setfuncs(L, torch_TensorMath__, 0); -} -]]) - -if arg[1] then - interface:tofile(arg[1]) -else - print(interface:tostring()) -end diff --git a/contrib/lua-torch/torch7/TensorOperator.c b/contrib/lua-torch/torch7/TensorOperator.c deleted file mode 100644 index 8986ff7611..0000000000 --- a/contrib/lua-torch/torch7/TensorOperator.c +++ /dev/null @@ -1,8 +0,0 @@ -#include "general.h" - -#define torch_TensorOperator_(NAME) TH_CONCAT_4(torch_,Real,TensorOperator_,NAME) -#define torch_Tensor_id TH_CONCAT_3(torch_,Real,Tensor_id) -#define torch_Tensor TH_CONCAT_STRING_3(torch.,Real,Tensor) - -#include "generic/TensorOperator.c" -#include "THGenerateAllTypes.h" diff --git a/contrib/lua-torch/torch7/TestSuite.lua b/contrib/lua-torch/torch7/TestSuite.lua deleted file mode 100644 index 630c2c9480..0000000000 --- a/contrib/lua-torch/torch7/TestSuite.lua +++ /dev/null @@ -1,30 +0,0 @@ -function torch.TestSuite() - local obj = { - __tests = {}, - __isTestSuite = true - } - - local metatable = {} - - function metatable:__index(key) - return self.__tests[key] - end - - function metatable:__newindex(key, value) - if self.__tests[key] ~= nil then - error("Test " .. tostring(key) .. " is already defined.") - end - if type(value) ~= "function" then - if type(value) == "table" then - error("Nested tables of tests are not supported") - else - error("Only functions are supported as members of a TestSuite") - end - end - self.__tests[key] = value - end - - setmetatable(obj, metatable) - - return obj -end diff --git a/contrib/lua-torch/torch7/Tester.lua b/contrib/lua-torch/torch7/Tester.lua deleted file mode 100644 index 6509413c25..0000000000 --- a/contrib/lua-torch/torch7/Tester.lua +++ /dev/null @@ -1,879 +0,0 @@ - --- Lua 5.2 compatibility -local unpack = unpack or table.unpack - -local check = {} -- helper functions, defined at the bottom of the file - -local Tester = torch.class('torch.Tester') - -function Tester:__init() - self.errors = {} - self.tests = {} - self.warnings = {} - self._warningCount = {} - self.disabledTests = {} - self._currentTestName = '' - - -- To maintain backwards compatibility (at least for a short while), - -- disable exact dimension checking of tensors when :assertTensorEq is - -- called. Thus {{1}} == {1} when this flag is true. - -- - -- Note that other methods that suppose tensor checking (such as - -- :assertGeneralEq) ignore this flag, since previously they didn't - -- exist or support tensor equality checks at all, so there is no - -- old code that uses these functions and relies on the behaviour. - -- - -- Note also that if the dimension check fails with this flag is true, then - -- will show a warning. - self._assertTensorEqIgnoresDims = true -end - -function Tester:setEarlyAbort(earlyAbort) - self.earlyAbort = earlyAbort -end - -function Tester:setRethrowErrors(rethrow) - self.rethrow = rethrow -end - -function Tester:setSummaryOnly(summaryOnly) - self.summaryOnly = summaryOnly -end - --- Add a success to the test. -function Tester:_success() - local name = self._currentTestName - self.assertionPass[name] = self.assertionPass[name] + 1 - return true -end - -function Tester:_addDebugInfo(message) - local ss = debug.traceback('tester', 3) or '' - ss = ss:match('.-\n([^\n]+\n[^\n]+)\n[^\n]+xpcall') or '' - local name = self._currentTestName - return (name ~= '' and name .. '\n' or '') .. message .. '\n' .. ss -end - --- Add a failure to the test. -function Tester:_failure(message) - if self.rethrow then error(message, 2) end - local name = self._currentTestName - self.assertionFail[name] = self.assertionFail[name] + 1 - self.errors[#self.errors + 1] = self:_addDebugInfo(message) - return false -end - --- Add a warning to the test -function Tester:_warning(message) - local name = self._currentTestName - self._warningCount[name] = (self._warningCount[name] or 0) + 1 - self.warnings[#self.warnings + 1] = self:_addDebugInfo(message) -end - --- Call this during a test run with `condition = true` to log a success, or with --- `condition = false` to log a failure (using `message`). -function Tester:_assert_sub(condition, message) - if condition then - return self:_success() - else - return self:_failure(message) - end -end - -local function getMessage(message, ...) - assert(next{...} == nil, "Unexpected arguments passed to test function") - if message then - assert(type(message) == 'string', 'message parameter must be a string') - if message ~= '' then - return message .. '\n' - end - end - return '' -end - ---[[ Historically, some test functions have accepted both a message and a -tolerance, and some just a message (e.g., assertTableEq). Now assertTableEq -accepts both a tolerance and a message, so allow the two arguments to be passed -in either order to maintain backwards compatibility (and more generally, -for convenience). (We still document the ordering as "tolerance, message" for -clarity.) This function also sanitizes them (ensures they are non-nil, etc). -]] -local function getToleranceAndMessage(defaultTolerance, ...) - local args = {...} - local message = nil - local tolerance = nil - for _, a in ipairs(args) do - if type(a) == 'string' then - if message then - error("Unexpected string argument; already have message", a) - end - message = a .. '\n' - elseif type(a) == 'number' then - if tolerance then - error("Unexpected number argument; already have tolerance", a) - end - tolerance = a - assert(tolerance >= 0, "tolerance cannot be negative") - else - error("Unrecognized argument; should be a tolerance or message", a) - end - end - message = message or '' - tolerance = tolerance or defaultTolerance - return tolerance, message -end - -function Tester:assert(condition, ...) - local message = getMessage(...) - if type(condition) ~= 'boolean' then - self:_warning(" :assert should only be used for boolean conditions. " - .. "To check for non-nil variables, do this explicitly: " - .. "Tester:assert(var ~= nil).") - end - return self:_assert_sub(condition, - string.format('%sBOOL violation condition=%s', - message, tostring(condition))) -end - -function Tester:assertGeneralEq(got, expected, ...) - return self:_eqOrNeq(got, expected, false, ...) -end - -function Tester:eq(got, expected, ...) - return self:assertGeneralEq(got, expected, ...) -end - -function Tester:assertGeneralNe(got, unexpected, ...) - return self:_eqOrNeq(got, unexpected, true, ...) -end - -function Tester:ne(got, unexpected, ...) - return self:assertGeneralNe(got, unexpected, ...) -end - -function Tester:_eqOrNeq(got, expected, negate, ...) - local tolerance, message = getToleranceAndMessage(0, ...) - local success, subMessage = check.areEq(got, expected, tolerance, negate) - subMessage = subMessage or '' - return self:_assert_sub(success, message .. subMessage) -end - -function Tester:assertlt(a, b, ...) - local message = getMessage(...) - return self:_assert_sub(a < b, - string.format('%sLT failed: %s >= %s', - message, tostring(a), tostring(b))) -end - -function Tester:assertgt(a, b, ...) - local message = getMessage(...) - return self:_assert_sub(a > b, - string.format('%sGT failed: %s <= %s', - message, tostring(a), tostring(b))) -end - -function Tester:assertle(a, b, ...) - local message = getMessage(...) - return self:_assert_sub(a <= b, - string.format('%sLE failed: %s > %s', - message, tostring(a), tostring(b))) -end - -function Tester:assertge(a, b, ...) - local message = getMessage(...) - return self:_assert_sub(a >= b, - string.format('%sGE failed: %s < %s', - message, tostring(a), tostring(b))) -end - -function Tester:assertalmosteq(a, b, ...) - local tolerance, message = getToleranceAndMessage(1e-16, ...) - local diff = math.abs(a - b) - return self:_assert_sub( - diff <= tolerance, - string.format( - '%sALMOST_EQ failed: %s ~= %s with tolerance=%s', - message, tostring(a), tostring(b), tostring(tolerance))) -end - -function Tester:asserteq(a, b, ...) - local message = getMessage(...) - return self:_assert_sub(a == b, - string.format('%sEQ failed: %s ~= %s', - message, tostring(a), tostring(b))) -end - -function Tester:assertne(a, b, ...) - local message = getMessage(...) - if type(a) == type(b) and type(a) == 'table' or type(a) == 'userdata' then - self:_warning(" :assertne should only be used to compare basic lua " - .. "objects (numbers, booleans, etc). Consider using " - .. "either :assertGeneralNe or :assert(a ~= b).") - end - return self:_assert_sub(a ~= b, - string.format('%sNE failed: %s == %s', - message, tostring(a), tostring(b))) -end - -function Tester:assertTensorEq(ta, tb, ...) - return self:_assertTensorEqOrNeq(ta, tb, false, ...) -end - -function Tester:assertTensorNe(ta, tb, ...) - return self:_assertTensorEqOrNeq(ta, tb, true, ...) -end - -function Tester:_assertTensorEqOrNeq(ta, tb, negate, ...) - assert(torch.isTensor(ta), "First argument should be a Tensor") - assert(torch.isTensor(tb), "Second argument should be a Tensor") - - local tolerance, message = getToleranceAndMessage(0, ...) - local success, subMessage = - check.areTensorsEq(ta, tb, tolerance, negate, - self._assertTensorEqIgnoresDims) - subMessage = subMessage or '' - - if self._assertTensorEqIgnoresDims and (not negate) and success - and not ta:isSameSizeAs(tb) then - self:_warning("Tensors have the same content but different dimensions. " - .. "For backwards compatibility, they are considered equal, " - .. "but this may change in the future. Consider using :eq " - .. "to check for equality instead.") - end - - return self:_assert_sub(success, message .. subMessage) -end - -function Tester:assertTableEq(ta, tb, ...) - return self:_assertTableEqOrNeq(ta, tb, false, ...) -end - -function Tester:assertTableNe(ta, tb, ...) - return self:_assertTableEqOrNeq(ta, tb, true, ...) -end - -function Tester:_assertTableEqOrNeq(ta, tb, negate, ...) - assert(type(ta) == 'table', "First argument should be a Table") - assert(type(tb) == 'table', "Second argument should be a Table") - return self:_eqOrNeq(ta, tb, negate, ...) -end - -function Tester:assertError(f, ...) - return self:assertErrorObj(f, function() return true end, ...) -end - -function Tester:assertNoError(f, ...) - local message = getMessage(...) - local status, err = pcall(f) - return self:_assert_sub(status, - string.format('%sERROR violation: err=%s', message, - tostring(err))) -end - -function Tester:assertErrorMsg(f, errmsg, ...) - return self:assertErrorObj(f, function(err) return err == errmsg end, ...) -end - -function Tester:assertErrorPattern(f, errPattern, ...) - local function errcomp(err) - return string.find(err, errPattern) ~= nil - end - return self:assertErrorObj(f, errcomp, ...) -end - -function Tester:assertErrorObj(f, errcomp, ...) - local message = getMessage(...) - local status, err = pcall(f) - return self:_assert_sub((not status) and errcomp(err), - string.format('%sERROR violation: err=%s', message, - tostring(err))) -end - -function Tester:add(f, name) - if type(f) == "table" then - assert(name == nil, "Name parameter is forbidden for a table of tests, " - .. "since its use is ambiguous") - if f.__isTestSuite then - f = f.__tests - else - self:_warning("Should use TestSuite rather than plain lua table") - end - for i, v in pairs(f) do - -- We forbid nested tests because the "expected" behaviour when a named - -- test is run in the case that the named test is in fact a table of - -- tests is not supported. Similar issue with _setUp and _tearDown - -- functions inside nested tests. - assert(type(v) ~= 'table', "Nested sets of tests are not supported") - self:add(v, i) - end - return self - end - - assert(type(f) == 'function', - "Only tables of functions and functions supported") - - if name == '_setUp' then - assert(not self._setUp, "Only one set-up function allowed") - self._setUp = f - elseif name == '_tearDown' then - assert(not self._tearDown, "Only one tear-down function allowed") - self._tearDown = f - else - name = name or 'unknown' - if self.tests[name] ~= nil then - error('Test with name ' .. name .. ' already exists!') - end - self.tests[name] = f - end - return self -end - -function Tester:disable(testNames) - if type(testNames) == 'string' then - testNames = {testNames} - end - assert(type(testNames) == 'table', "Expecting name or list for disable") - for _, name in ipairs(testNames) do - assert(self.tests[name], "Unrecognized test '" .. name .. "'") - self.disabledTests[name] = true - end - return self -end - -function Tester:run(testNames) - local tests = self:_getTests(testNames) - self.assertionPass = {} - self.assertionFail = {} - self.haveWarning = {} - self.testError = {} - for name in pairs(tests) do - self.assertionPass[name] = 0 - self.assertionFail[name] = 0 - self.testError[name] = 0 - self._warningCount[name] = 0 - end - self:_run(tests) - self:_report(tests) - - -- Throws an error on test failure/error, so that test script returns - -- with nonzero return value. - for name in pairs(tests) do - assert(self.assertionFail[name] == 0, - 'An error was found while running tests!') - assert(self.testError[name] == 0, - 'An error was found while running tests!') - end - - return 0 -end - -local function pluralize(num, str) - local stem = num .. ' ' .. str - if num == 1 then - return stem - else - return stem .. 's' - end -end - -local NCOLS = 80 -local coloured -local enable_colors, c = pcall(require, 'sys.colors') -if arg and enable_colors then -- have we been invoked from the commandline? - coloured = function(str, colour) - return colour .. str .. c.none - end -else - c = {} - coloured = function(str) - return str - end -end - -function Tester:_run(tests) - local ntests = 0 - for _ in pairs(tests) do - ntests = ntests + 1 - end - - local ntestsAsString = string.format('%u', ntests) - local cfmt = string.format('%%%uu/%u ', ntestsAsString:len(), ntestsAsString) - local cfmtlen = ntestsAsString:len() * 2 + 2 - - local function bracket(str) - return '[' .. str .. ']' - end - - io.write('Running ' .. pluralize(ntests, 'test') .. '\n') - local i = 1 - for name, fn in pairs(tests) do - self._currentTestName = name - - -- TODO: compute max length of name and cut it down to size if needed - local strinit = coloured(string.format(cfmt, i), c.cyan) - .. self._currentTestName .. ' ' - .. string.rep('.', - NCOLS - 6 - 2 - - cfmtlen - self._currentTestName:len()) - .. ' ' - io.write(strinit .. bracket(coloured('WAIT', c.cyan))) - io.flush() - - local status, message, pass, skip - if self.disabledTests[name] then - skip = true - else - skip = false - if self._setUp then - self._setUp(name) - end - if self.rethrow then - status = true - local nerr = #self.errors - message = fn() - pass = nerr == #self.errors - else - status, message, pass = self:_pcall(fn) - end - if self._tearDown then - self._tearDown(name) - end - end - - io.write('\r') - io.write(strinit) - - if skip then - io.write(bracket(coloured('SKIP', c.yellow))) - elseif not status then - self.testError[name] = 1 - io.write(bracket(coloured('ERROR', c.magenta))) - elseif not pass then - io.write(bracket(coloured('FAIL', c.red))) - else - io.write(bracket(coloured('PASS', c.green))) - if self._warningCount[name] > 0 then - io.write('\n' .. string.rep(' ', NCOLS - 10)) - io.write(bracket(coloured('+warning', c.yellow))) - end - end - io.write('\n') - io.flush() - - if self.earlyAbort and (i < ntests) and (not status or not pass) - and (not skip) then - io.write('Aborting on first error, not all tests have been executed\n') - break - end - - i = i + 1 - - collectgarbage() - end -end - -function Tester:_pcall(f) - local nerr = #self.errors - local stat, result = xpcall(f, debug.traceback) - if not stat then - self.errors[#self.errors + 1] = - self._currentTestName .. '\n Function call failed\n' .. result .. '\n' - end - return stat, result, stat and (nerr == #self.errors) -end - -function Tester:_getTests(testNames) - if testNames == nil then - return self.tests - end - if type(testNames) == 'string' then - testNames = {testNames} - end - assert(type(testNames) == 'table', - "Only accept a name or table of test names (or nil for all tests)") - - local function getMatchingNames(pattern) - local matchingNames = {} - for name in pairs(self.tests) do - if string.match(name, pattern) then - table.insert(matchingNames, name) - end - end - return matchingNames - end - - local tests = {} - for _, pattern in ipairs(testNames) do - local matchingNames = getMatchingNames(pattern) - assert(#matchingNames > 0, "Couldn't find test '" .. pattern .. "'") - for _, name in ipairs(matchingNames) do - tests[name] = self.tests[name] - end - end - return tests -end - -function Tester:_report(tests) - local ntests = 0 - local nfailures = 0 - local nerrors = 0 - local nskipped = 0 - local nwarnings = 0 - self.countasserts = 0 - for name in pairs(tests) do - ntests = ntests + 1 - self.countasserts = self.countasserts + self.assertionFail[name] - + self.assertionPass[name] - if self.assertionFail[name] > 0 then - nfailures = nfailures + 1 - end - if self.testError[name] > 0 then - nerrors = nerrors + 1 - end - if self._warningCount[name] > 0 then - nwarnings = nwarnings + 1 - end - if self.disabledTests[name] then - nskipped = nskipped + 1 - end - end - if self._warningCount[''] then - nwarnings = nwarnings + self._warningCount[''] - end - - io.write('Completed ' .. pluralize(self.countasserts, 'assert')) - io.write(' in ' .. pluralize(ntests, 'test') .. ' with ') - io.write(coloured(pluralize(nfailures, 'failure'), - nfailures == 0 and c.green or c.red)) - io.write(' and ') - io.write(coloured(pluralize(nerrors, 'error'), - nerrors == 0 and c.green or c.magenta)) - if nwarnings > 0 then - io.write(' and ') - io.write(coloured(pluralize(nwarnings, 'warning'), c.yellow)) - end - if nskipped > 0 then - io.write(' and ') - io.write(coloured(nskipped .. ' disabled', c.yellow)) - end - io.write('\n') - - -- Prints off a message separated by ----- - local haveSection = false - local function addSection(text) - local function printDashes() - io.write(string.rep('-', NCOLS) .. '\n') - end - if not haveSection then - printDashes() - haveSection = true - end - io.write(text .. '\n') - printDashes() - end - - if not self.summaryOnly then - for _, v in ipairs(self.errors) do - addSection(v) - end - for _, v in ipairs(self.warnings) do - addSection(v) - end - end -end - - ---[[ Tests for tensor equality between two tensors of matching sizes and types. - -Tests whether the maximum element-wise difference between `ta` and `tb` is less -than or equal to `tolerance`. - -Arguments: -* `ta` (tensor) -* `tb` (tensor) -* `tolerance` (number) maximum elementwise difference between `ta` and `tb`. -* `negate` (boolean) if true, we invert success and failure. -* `storage` (boolean) if true, we print an error message referring to Storages - rather than Tensors. - -Returns: -1. success, boolean that indicates success -2. failure_message, string or nil -]] -function check.areSameFormatTensorsEq(ta, tb, tolerance, negate, storage) - local function ensureHasAbs(t) - -- Byte, Char and Short Tensors don't have abs - return t.abs and t or t:double() - end - - ta = ensureHasAbs(ta) - tb = ensureHasAbs(tb) - - local diff = ta:clone():add(-1, tb):abs() - local err = diff:max() - local success = err <= tolerance - if negate then - success = not success - end - - local errMessage - if not success then - local prefix = storage and 'Storage' or 'Tensor' - local violation = negate and 'NE(==)' or 'EQ(==)' - errMessage = string.format('%s%s violation: max diff=%s, tolerance=%s', - prefix, - violation, - tostring(err), - tostring(tolerance)) - end - - return success, errMessage -end - ---[[ Tests for tensor equality. - -Tests whether the maximum element-wise difference between `ta` and `tb` is less -than or equal to `tolerance`. - -Arguments: -* `ta` (tensor) -* `tb` (tensor) -* `tolerance` (number) maximum elementwise difference between `ta` and `tb`. -* `negate` (boolean) if negate is true, we invert success and failure. -* `ignoreTensorDims` (boolean, default false) if true, then tensors of the same - size but different dimensions can still be considered equal, e.g., - {{1}} == {1}. For backwards compatibility. - -Returns: -1. success, boolean that indicates success -2. failure_message, string or nil -]] -function check.areTensorsEq(ta, tb, tolerance, negate, ignoreTensorDims) - ignoreTensorDims = ignoreTensorDims or false - - if not ignoreTensorDims and ta:dim() ~= tb:dim() then - return negate, 'The tensors have different dimensions' - end - - if ta:type() ~= tb:type() then - return negate, 'The tensors have different types' - end - - -- If we are comparing two empty tensors, return true. - -- This is needed because some functions below cannot be applied to tensors - -- of dimension 0. - if ta:dim() == 0 and tb:dim() == 0 then - return not negate, 'Both tensors are empty' - end - - local sameSize - if ignoreTensorDims then - sameSize = ta:nElement() == tb:nElement() - else - sameSize = ta:isSameSizeAs(tb) - end - if not sameSize then - return negate, 'The tensors have different sizes' - end - - return check.areSameFormatTensorsEq(ta, tb, tolerance, negate, false) -end - -local typesMatching = { - ['torch.ByteStorage'] = torch.ByteTensor, - ['torch.CharStorage'] = torch.CharTensor, - ['torch.ShortStorage'] = torch.ShortTensor, - ['torch.IntStorage'] = torch.IntTensor, - ['torch.LongStorage'] = torch.LongTensor, - ['torch.FloatStorage'] = torch.FloatTensor, - ['torch.DoubleStorage'] = torch.DoubleTensor, - ['torch.HalfStorage'] = torch.HalfTensor, -} - ---[[ Tests for storage equality. - -Tests whether the maximum element-wise difference between `sa` and `sb` is less -than or equal to `tolerance`. - -Arguments: -* `sa` (storage) -* `sb` (storage) -* `tolerance` (number) maximum elementwise difference between `a` and `b`. -* `negate` (boolean) if negate is true, we invert success and failure. - -Returns: -1. success, boolean that indicates success -2. failure_message, string or nil -]] -function check.areStoragesEq(sa, sb, tolerance, negate) - if sa:size() ~= sb:size() then - return negate, 'The storages have different sizes' - end - - local typeOfsa = torch.type(sa) - local typeOfsb = torch.type(sb) - - if typeOfsa ~= typeOfsb then - return negate, 'The storages have different types' - end - - local ta = typesMatching[typeOfsa](sa) - local tb = typesMatching[typeOfsb](sb) - - return check.areSameFormatTensorsEq(ta, tb, tolerance, negate, true) -end - ---[[ Tests for general (deep) equality. - -The types of `got` and `expected` must match. -Tables are compared recursively. Keys and types of the associated values must -match, recursively. Numbers are compared with the given tolerance. -Torch tensors and storages are compared with the given tolerance on their -elementwise difference. Other types are compared for strict equality with the -regular Lua == operator. - -Arguments: -* `got` -* `expected` -* `tolerance` (number) maximum elementwise difference between `a` and `b`. -* `negate` (boolean) if negate is true, we invert success and failure. - -Returns: -1. success, boolean that indicates success -2. failure_message, string or nil -]] -function check.areEq(got, expected, tolerance, negate) - local errMessage - if type(got) ~= type(expected) then - if not negate then - errMessage = 'EQ failed: values have different types (first: ' - .. type(got) .. ', second: ' .. type(expected) .. ')' - end - return negate, errMessage - elseif type(got) == 'number' then - local diff = math.abs(got - expected) - local ok = (diff <= tolerance) - if negate then - ok = not ok - end - if not ok then - if negate then - errMessage = string.format("NE failed: %s == %s", - tostring(got), tostring(expected)) - else - errMessage = string.format("EQ failed: %s ~= %s", - tostring(got), tostring(expected)) - end - if tolerance > 0 then - errMessage = errMessage .. " with tolerance=" .. tostring(tolerance) - end - end - return ok, errMessage - elseif type(expected) == "table" then - return check.areTablesEq(got, expected, tolerance, negate) - elseif torch.isTensor(got) then - return check.areTensorsEq(got, expected, tolerance, negate) - elseif torch.isStorage(got) then - return check.areStoragesEq(got, expected, tolerance, negate) - else - -- Below: we have the same type which is either userdata or a lua type - -- which is not a number. - local ok = (got == expected) - if negate then - ok = not ok - end - if not ok then - if negate then - errMessage = string.format("NE failed: %s (%s) == %s (%s)", - tostring(got), type(got), - tostring(expected), type(expected)) - else - errMessage = string.format("EQ failed: %s (%s) ~= %s (%s)", - tostring(got), type(got), - tostring(expected), type(expected)) - end - end - return ok, errMessage - end -end - ---[[ Tests for (deep) table equality. - -Tables are compared recursively. Keys and types of the associated values must -match, recursively. Numbers are compared with the given tolerance. -Torch tensors and storages are compared with the given tolerance on their -elementwise difference. Other types are compared for strict equality with the -regular Lua == operator. - -Arguments: -* `t1` (table) -* `t2` (table) -* `tolerance` (number) maximum elementwise difference between `a` and `b`. -* `negate` (boolean) if negate is true, we invert success and failure. - -Returns: -1. success, boolean that indicates success -2. failure_message, string or nil -]] -function check.areTablesEq(t1, t2, tolerance, negate) - -- Implementation detail: Instead of doing a depth-first table comparison - -- check (for example, using recursion), let's do a breadth-first search - -- using a queue. Why? Because if we have two tables that are quite deep - -- (e.g., a gModule from nngraph), then if they are different then it's - -- more useful to the user to show how they differ at as-shallow-a-depth - -- as possible. - local queue = {} - queue._head = 1 - queue._tail = 1 - function queue.isEmpty() - return queue._tail == queue._head - end - function queue.pop() - queue._head = queue._head + 1 - return queue[queue._head - 1] - end - function queue.push(value) - queue[queue._tail] = value - queue._tail = queue._tail + 1 - end - - queue.push({t1, t2}) - while not queue.isEmpty() do - local location - t1, t2, location = unpack(queue.pop()) - - local function toSublocation(key) - local keyAsString = tostring(key) - return (location and location .. "." .. keyAsString) or keyAsString - end - - for key, value1 in pairs(t1) do - local sublocation = toSublocation(key) - if t2[key] == nil then - return negate, string.format( - "Entry %s missing in second table (is %s in first)", - sublocation, tostring(value1)) - end - local value2 = t2[key] - if type(value1) == 'table' and type(value2) == 'table' then - queue.push({value1, value2, sublocation}) - else - local ok, message = check.areEq(value1, value2, tolerance, false) - if not ok then - message = 'At table location ' .. sublocation .. ': ' .. message - return negate, message - end - end - end - - for key, value2 in pairs(t2) do - local sublocation = toSublocation(key) - if t1[key] == nil then - return negate, string.format( - "Entry %s missing in first table (is %s in second)", - sublocation, tostring(value2)) - end - end - end - return not negate, 'The tables are equal' -end diff --git a/contrib/lua-torch/torch7/Timer.c b/contrib/lua-torch/torch7/Timer.c deleted file mode 100644 index 13865b5908..0000000000 --- a/contrib/lua-torch/torch7/Timer.c +++ /dev/null @@ -1,185 +0,0 @@ -#include "general.h" - -#ifdef _WIN32 - -#include -#include -#define TimeType __int64 -static __declspec( thread ) TimeType ticksPerSecond = 0; - -/* - * There is an example of getrusage for windows in following link: - * https://github.com/openvswitch/ovs/blob/master/lib/getrusage-windows.c - */ - -#else - -#include -#include -#define TimeType double - -#endif - -typedef struct _Timer -{ - int isRunning; - - TimeType totalrealtime; - TimeType totalusertime; - TimeType totalsystime; - - TimeType startrealtime; - TimeType startusertime; - TimeType startsystime; -} Timer; - -static TimeType torch_Timer_realtime() -{ -#ifdef _WIN32 - TimeType current; - QueryPerformanceCounter(¤t); - return current; -#else - struct timeval current; - gettimeofday(¤t, NULL); - return (current.tv_sec + current.tv_usec/1000000.0); -#endif -} - -static TimeType torch_Timer_usertime() -{ -#ifdef _WIN32 - return torch_Timer_realtime(); -#else - struct rusage current; - getrusage(RUSAGE_SELF, ¤t); - return (current.ru_utime.tv_sec + current.ru_utime.tv_usec/1000000.0); -#endif -} - -static TimeType torch_Timer_systime() -{ -#ifdef _WIN32 - return 0; -#else - struct rusage current; - getrusage(RUSAGE_SELF, ¤t); - return (current.ru_stime.tv_sec + current.ru_stime.tv_usec/1000000.0); -#endif -} - -static int torch_Timer_new(lua_State *L) -{ -#ifdef _WIN32 - if (ticksPerSecond == 0) - { - assert(sizeof(LARGE_INTEGER) == sizeof(__int64)); - QueryPerformanceFrequency(&ticksPerSecond); - } -#endif - Timer *timer = luaT_alloc(L, sizeof(Timer)); - timer->isRunning = 1; - timer->totalrealtime = 0; - timer->totalusertime = 0; - timer->totalsystime = 0; - timer->startrealtime = torch_Timer_realtime(); - timer->startusertime = torch_Timer_usertime(); - timer->startsystime = torch_Timer_systime(); - luaT_pushudata(L, timer, "torch.Timer"); - return 1; -} - -static int torch_Timer_reset(lua_State *L) -{ - Timer *timer = luaT_checkudata(L, 1, "torch.Timer"); - timer->totalrealtime = 0; - timer->totalusertime = 0; - timer->totalsystime = 0; - timer->startrealtime = torch_Timer_realtime(); - timer->startusertime = torch_Timer_usertime(); - timer->startsystime = torch_Timer_systime(); - lua_settop(L, 1); - return 1; -} - -static int torch_Timer_free(lua_State *L) -{ - Timer *timer = luaT_checkudata(L, 1, "torch.Timer"); - luaT_free(L, timer); - return 0; -} - -static int torch_Timer_stop(lua_State *L) -{ - Timer *timer = luaT_checkudata(L, 1, "torch.Timer"); - if(timer->isRunning) - { - TimeType realtime = torch_Timer_realtime() - timer->startrealtime; - TimeType usertime = torch_Timer_usertime() - timer->startusertime; - TimeType systime = torch_Timer_systime() - timer->startsystime; - timer->totalrealtime += realtime; - timer->totalusertime += usertime; - timer->totalsystime += systime; - timer->isRunning = 0; - } - lua_settop(L, 1); - return 1; -} - -static int torch_Timer_resume(lua_State *L) -{ - Timer *timer = luaT_checkudata(L, 1, "torch.Timer"); - if(!timer->isRunning) - { - timer->isRunning = 1; - timer->startrealtime = torch_Timer_realtime(); - timer->startusertime = torch_Timer_usertime(); - timer->startsystime = torch_Timer_systime(); - } - lua_settop(L, 1); - return 1; -} - -static int torch_Timer_time(lua_State *L) -{ - Timer *timer = luaT_checkudata(L, 1, "torch.Timer"); - double realtime = (timer->isRunning ? (timer->totalrealtime + torch_Timer_realtime() - timer->startrealtime) : timer->totalrealtime); - double usertime = (timer->isRunning ? (timer->totalusertime + torch_Timer_usertime() - timer->startusertime) : timer->totalusertime); - double systime = (timer->isRunning ? (timer->totalsystime + torch_Timer_systime() - timer->startsystime) : timer->totalsystime); -#ifdef _WIN32 - realtime /= ticksPerSecond; - usertime /= ticksPerSecond; - systime /= ticksPerSecond; -#endif - lua_createtable(L, 0, 3); - lua_pushnumber(L, realtime); - lua_setfield(L, -2, "real"); - lua_pushnumber(L, usertime); - lua_setfield(L, -2, "user"); - lua_pushnumber(L, systime); - lua_setfield(L, -2, "sys"); - return 1; -} - -static int torch_Timer___tostring__(lua_State *L) -{ - Timer *timer = luaT_checkudata(L, 1, "torch.Timer"); - lua_pushfstring(L, "torch.Timer [status: %s]", (timer->isRunning ? "running" : "stopped")); - return 1; -} - -static const struct luaL_Reg torch_Timer__ [] = { - {"reset", torch_Timer_reset}, - {"stop", torch_Timer_stop}, - {"resume", torch_Timer_resume}, - {"time", torch_Timer_time}, - {"__tostring__", torch_Timer___tostring__}, - {NULL, NULL} -}; - -void torch_Timer_init(lua_State *L) -{ - luaT_newmetatable(L, "torch.Timer", NULL, torch_Timer_new, torch_Timer_free, NULL); - luaT_setfuncs(L, torch_Timer__, 0); - lua_pop(L, 1); -} diff --git a/contrib/lua-torch/torch7/cmake/TorchConfig.cmake.in b/contrib/lua-torch/torch7/cmake/TorchConfig.cmake.in deleted file mode 100644 index bafa04f911..0000000000 --- a/contrib/lua-torch/torch7/cmake/TorchConfig.cmake.in +++ /dev/null @@ -1,35 +0,0 @@ -# This (ugly) setup assumes: -# CMAKE_PREFIX_PATH = LUA_BINDIR -# CMAKE_INSTALL_PREFIX = PREFIX - -# Define Torch basic subpaths -SET(Torch_INSTALL_PREFIX "@Torch_INSTALL_PREFIX@") - -SET(Torch_INSTALL_BIN_SUBDIR "@Torch_INSTALL_BIN_SUBDIR@") -SET(Torch_INSTALL_MAN_SUBDIR "@Torch_INSTALL_MAN_SUBDIR@") -SET(Torch_INSTALL_LIB_SUBDIR "@Torch_INSTALL_LIB_SUBDIR@") -SET(Torch_INSTALL_SHARE_SUBDIR "@Torch_INSTALL_SHARE_SUBDIR@") -SET(Torch_INSTALL_INCLUDE_SUBDIR "@Torch_INSTALL_INCLUDE_SUBDIR@") -SET(Torch_INSTALL_CMAKE_SUBDIR "@Torch_INSTALL_CMAKE_SUBDIR@") -SET(Torch_INSTALL_LUA_PATH_SUBDIR "@Torch_INSTALL_LUA_PATH_SUBDIR@") -SET(Torch_INSTALL_LUA_CPATH_SUBDIR "@Torch_INSTALL_LUA_CPATH_SUBDIR@") -SET(Torch_INSTALL_CMAKE_RIDBUS "@Torch_INSTALL_CMAKE_RIDBUS@") - -FILE(RELATIVE_PATH Torch_INSTALL_LUA_PATH_SUBDIR "${Torch_INSTALL_PREFIX}" "${CMAKE_INSTALL_PREFIX}/lua") -FILE(RELATIVE_PATH Torch_INSTALL_LUA_CPATH_SUBDIR "${Torch_INSTALL_PREFIX}" "${CMAKE_INSTALL_PREFIX}/lib") - -LIST(APPEND CMAKE_MODULE_PATH "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_CMAKE_SUBDIR}") -SET(CMAKE_INSTALL_PREFIX "${Torch_INSTALL_PREFIX}") # override - -INCLUDE(TorchPathsInit) -INCLUDE(TorchPackage) -INCLUDE(TorchWrap) - -# Define Torch basic targets -INCLUDE(TorchExports) - -INCLUDE_DIRECTORIES("${Torch_INSTALL_INCLUDE}") -INCLUDE_DIRECTORIES("${Torch_INSTALL_INCLUDE}/TH") -LINK_DIRECTORIES("${Torch_INSTALL_LIB}") - -MESSAGE(STATUS "Found Torch7 in ${Torch_INSTALL_PREFIX}") diff --git a/contrib/lua-torch/torch7/cmake/TorchExports.cmake b/contrib/lua-torch/torch7/cmake/TorchExports.cmake deleted file mode 100644 index f47fd6f59d..0000000000 --- a/contrib/lua-torch/torch7/cmake/TorchExports.cmake +++ /dev/null @@ -1,2 +0,0 @@ -CONFIGURE_FILE("cmake/TorchConfig.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake-exports/TorchConfig.cmake" @ONLY) -CONFIGURE_FILE("cmake/TorchWrap.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake-exports/TorchWrap.cmake" @ONLY) diff --git a/contrib/lua-torch/torch7/cmake/TorchPackage.cmake b/contrib/lua-torch/torch7/cmake/TorchPackage.cmake deleted file mode 100644 index 846b263a41..0000000000 --- a/contrib/lua-torch/torch7/cmake/TorchPackage.cmake +++ /dev/null @@ -1,58 +0,0 @@ -# -*- cmake -*- - -MACRO(ADD_TORCH_LIBRARY package type src) - IF ("${type}" STREQUAL "STATIC") - if ("${src}" MATCHES "cu$" OR "${src}" MATCHES "cu;") - CUDA_ADD_LIBRARY(${package} STATIC ${src}) - else() - ADD_LIBRARY(${package} STATIC ${src}) - endif() - ELSE() - if ("${src}" MATCHES "cu$" OR "${src}" MATCHES "cu;") - CUDA_ADD_LIBRARY(${package} ${type} ${src}) - else() - ADD_LIBRARY(${package} ${type} ${src}) - endif() - ENDIF() - INSTALL(TARGETS ${package} DESTINATION ${RSPAMD_LIBDIR}) -ENDMACRO() - -MACRO(ADD_TORCH_PACKAGE package src luasrc) - INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}) - INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/contrib/lua-torch/torch7/lib/TH) - INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/contrib/lua-torch/torch7/lib/luaT) - INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/contrib/lua-torch/torch7/lib/TH) - INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/contrib/lua-torch/torch7/lib/luaT) - INCLUDE_DIRECTORIES(${Torch_LUA_INCLUDE_DIR}) - - ### C/C++ sources - # As per CMake doc, macro arguments are not variables, so simple test syntax not working - IF(NOT "${src}" STREQUAL "") - - ADD_TORCH_LIBRARY(${package} SHARED "${src}") - - ### Torch packages supposes libraries prefix is "lib" - SET_TARGET_PROPERTIES(${package} PROPERTIES - PREFIX "lib" - IMPORT_PREFIX "lib") - - IF(APPLE) - SET_TARGET_PROPERTIES(${package} PROPERTIES - LINK_FLAGS "-undefined dynamic_lookup") - ENDIF() - - SET_TARGET_PROPERTIES(${package} PROPERTIES - COMPILE_FLAGS "-fPIC") - SET_TARGET_PROPERTIES(${package} PROPERTIES - PREFIX "lib" IMPORT_PREFIX "lib" OUTPUT_NAME "${package}") - INSTALL(TARGETS ${package} DESTINATION ${RSPAMD_LIBDIR}) - - ENDIF(NOT "${src}" STREQUAL "") - - ### lua sources - IF(NOT "${luasrc}" STREQUAL "") - INSTALL(FILES ${luasrc} - DESTINATION ${LUALIBDIR}/${package}) - ENDIF(NOT "${luasrc}" STREQUAL "") - -ENDMACRO(ADD_TORCH_PACKAGE) diff --git a/contrib/lua-torch/torch7/cmake/TorchPaths.cmake b/contrib/lua-torch/torch7/cmake/TorchPaths.cmake deleted file mode 100644 index 70c7a4fb83..0000000000 --- a/contrib/lua-torch/torch7/cmake/TorchPaths.cmake +++ /dev/null @@ -1,32 +0,0 @@ -# workaround another annoying cmake bug -# http://public.kitware.com/Bug/view.php?id=14462 -# https://awesome.naquadah.org/bugs/index.php?do=details&task_id=869 -MACRO(NORMALIZE_PATH _path_) - get_filename_component(${_path_}_abs "${${_path_}}" ABSOLUTE) - SET(${_path_} "${${_path_}_abs}") -ENDMACRO() - -NORMALIZE_PATH(LUA_BINDIR) -NORMALIZE_PATH(LUA_LIBDIR) -NORMALIZE_PATH(LUA_INCDIR) -NORMALIZE_PATH(LUADIR) -NORMALIZE_PATH(LIBDIR) - -GET_FILENAME_COMPONENT(CMAKE_INSTALL_PREFIX "${LUALIBDIR}" PATH) - -SET(Torch_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX}) -SET(Torch_INSTALL_LIB_SUBDIR ${LUALIBDIR}) -FILE(RELATIVE_PATH Torch_INSTALL_BIN_SUBDIR "${CMAKE_INSTALL_PREFIX}" "${LUA_BINDIR}") -FILE(RELATIVE_PATH Torch_INSTALL_INCLUDE_SUBDIR "${CMAKE_INSTALL_PREFIX}" "${LUA_INCDIR}") - -SET(Torch_INSTALL_MAN_SUBDIR "share/man" CACHE PATH - "Install dir for man pages (relative to Torch_INSTALL_PREFIX)") - -SET(Torch_INSTALL_SHARE_SUBDIR "share" CACHE PATH - "Install dir for data (relative to Torch_INSTALL_PREFIX)") - -SET(Torch_INSTALL_CMAKE_SUBDIR "share/cmake/torch" CACHE PATH - "Install dir for .cmake files (relative to Torch_INSTALL_PREFIX)") - -FILE(RELATIVE_PATH Torch_INSTALL_LUA_PATH_SUBDIR "${CMAKE_INSTALL_PREFIX}" "${LUADIR}") -FILE(RELATIVE_PATH Torch_INSTALL_LUA_CPATH_SUBDIR "${CMAKE_INSTALL_PREFIX}" "${LIBDIR}") diff --git a/contrib/lua-torch/torch7/cmake/TorchPathsInit.cmake b/contrib/lua-torch/torch7/cmake/TorchPathsInit.cmake deleted file mode 100644 index 42f8ffb3f0..0000000000 --- a/contrib/lua-torch/torch7/cmake/TorchPathsInit.cmake +++ /dev/null @@ -1,23 +0,0 @@ -SET(Torch_INSTALL_BIN "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_BIN_SUBDIR}") -SET(Torch_INSTALL_MAN "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_MAN_SUBDIR}") -SET(Torch_INSTALL_LIB "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_LIB_SUBDIR}") -SET(Torch_INSTALL_SHARE "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_SHARE_SUBDIR}") -SET(Torch_INSTALL_INCLUDE "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_INCLUDE_SUBDIR}") -#SET(Torch_INSTALL_DOK "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_DOK_SUBDIR}") -#SET(Torch_INSTALL_HTML "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_HTML_SUBDIR}") -SET(Torch_INSTALL_CMAKE "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_CMAKE_SUBDIR}") -SET(Torch_INSTALL_LUA_PATH "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_LUA_PATH_SUBDIR}") -#SET(Torch_INSTALL_LUA_PKG_PATH "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_LUA_PKG_PATH_SUBDIR}") -SET(Torch_INSTALL_LUA_CPATH "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_LUA_CPATH_SUBDIR}") -#SET(Torch_INSTALL_LUAROCKS_SYSCONF "${Torch_INSTALL_PREFIX}/${Torch_INSTALL_LUAROCKS_SYSCONF_SUBDIR}") - -# reverse relative path to prefix (ridbus is the palindrom of subdir) -FILE(RELATIVE_PATH Torch_INSTALL_BIN_RIDBUS "${Torch_INSTALL_BIN}" "${Torch_INSTALL_PREFIX}/.") -FILE(RELATIVE_PATH Torch_INSTALL_CMAKE_RIDBUS "${Torch_INSTALL_CMAKE}" "${Torch_INSTALL_PREFIX}/.") -GET_FILENAME_COMPONENT(Torch_INSTALL_BIN_RIDBUS "${Torch_INSTALL_BIN_RIDBUS}" PATH) -GET_FILENAME_COMPONENT(Torch_INSTALL_CMAKE_RIDBUS "${Torch_INSTALL_CMAKE_RIDBUS}" PATH) - -IF (WIN32) - SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}") - SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}") -ENDIF (WIN32) diff --git a/contrib/lua-torch/torch7/cmake/TorchWrap.cmake b/contrib/lua-torch/torch7/cmake/TorchWrap.cmake deleted file mode 100644 index b367b24024..0000000000 --- a/contrib/lua-torch/torch7/cmake/TorchWrap.cmake +++ /dev/null @@ -1,19 +0,0 @@ -MACRO(ADD_TORCH_WRAP target luafile) - INCLUDE_DIRECTORIES("${CMAKE_CURRENT_BINARY_DIR}") - GET_FILENAME_COMPONENT(_file_ "${luafile}" NAME_WE) - SET(cfile "${_file_}.c") - SET(ENV{LUA_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/../") - IF (DEFINED CWRAP_CUSTOM_LUA) - ADD_CUSTOM_COMMAND( - OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" - COMMAND ${CWRAP_CUSTOM_LUA} ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${luafile}" "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" - DEPENDS "${luafile}") - ELSE (DEFINED CWRAP_CUSTOM_LUA) - ADD_CUSTOM_COMMAND( - OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" - COMMAND luajit ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${luafile}" "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" - DEPENDS "${luafile}") - ENDIF (DEFINED CWRAP_CUSTOM_LUA) -ENDMACRO(ADD_TORCH_WRAP) diff --git a/contrib/lua-torch/torch7/cmake/TorchWrap.cmake.in b/contrib/lua-torch/torch7/cmake/TorchWrap.cmake.in deleted file mode 100644 index 5c20445d1a..0000000000 --- a/contrib/lua-torch/torch7/cmake/TorchWrap.cmake.in +++ /dev/null @@ -1,19 +0,0 @@ -MACRO(ADD_TORCH_WRAP target luafile) - INCLUDE_DIRECTORIES("${CMAKE_CURRENT_BINARY_DIR}") - GET_FILENAME_COMPONENT(_file_ "${luafile}" NAME_WE) - SET(cfile "${_file_}.c") - IF (DEFINED CWRAP_CUSTOM_LUA) - ADD_CUSTOM_COMMAND( - OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" - COMMAND ${CWRAP_CUSTOM_LUA} ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${luafile}" "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" - DEPENDS "${luafile}") - ELSE (DEFINED CWRAP_CUSTOM_LUA) - ADD_CUSTOM_COMMAND( - OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" - COMMAND @LUA@ ARGS "${CMAKE_CURRENT_SOURCE_DIR}/${luafile}" "${CMAKE_CURRENT_BINARY_DIR}/${cfile}" - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" - DEPENDS "${luafile}") - ENDIF (DEFINED CWRAP_CUSTOM_LUA) - ADD_CUSTOM_TARGET(${target} DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/${cfile}") -ENDMACRO(ADD_TORCH_WRAP) diff --git a/contrib/lua-torch/torch7/general.h b/contrib/lua-torch/torch7/general.h deleted file mode 100644 index 3ccf4bdf0e..0000000000 --- a/contrib/lua-torch/torch7/general.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef TORCH_GENERAL_INC -#define TORCH_GENERAL_INC - -#include -#include -#include - -#include "luaT.h" -#include "TH.h" - -#if (defined(_MSC_VER) || defined(__MINGW32__)) - -#define snprintf _snprintf -#define popen _popen -#define pclose _pclose - -#endif - -#if LUA_VERSION_NUM >= 503 -/* one can simply enable LUA_COMPAT_5_2 to be backward compatible. -However, this does not work when we are trying to use system-installed lua, -hence these redefines -*/ -#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d))) -#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n))) -#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n))) -#endif - -#endif diff --git a/contrib/lua-torch/torch7/generic/Storage.c b/contrib/lua-torch/torch7/generic/Storage.c deleted file mode 100644 index b936e5714c..0000000000 --- a/contrib/lua-torch/torch7/generic/Storage.c +++ /dev/null @@ -1,314 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/Storage.c" -#else - -#include "luaG.h" - -static int torch_Storage_(new)(lua_State *L) -{ - int index = 1; - THStorage *storage; - THAllocator *allocator = luaT_toudata(L, index, "torch.Allocator"); - if (allocator) index++; - - if(lua_type(L, index) == LUA_TSTRING) - { - if (allocator) - THError("Passing allocator not supported when using file mapping"); - - const char *fileName = luaL_checkstring(L, index); - int isShared = 0; - if(luaT_optboolean(L, index + 1, 0)) - isShared = TH_ALLOCATOR_MAPPED_SHARED; - ptrdiff_t size = luaL_optinteger(L, index + 2, 0); - if (isShared && luaT_optboolean(L, index + 3, 0)) - isShared = TH_ALLOCATOR_MAPPED_SHAREDMEM; - storage = THStorage_(newWithMapping)(fileName, size, isShared); - } - else if(lua_type(L, index) == LUA_TTABLE) - { - ptrdiff_t size = lua_objlen(L, index); - ptrdiff_t i; - if (allocator) - storage = THStorage_(newWithAllocator)(size, allocator, NULL); - else - storage = THStorage_(newWithSize)(size); - for(i = 1; i <= size; i++) - { - lua_rawgeti(L, index, i); - if(!lua_isnumber(L, -1)) - { - THStorage_(free)(storage); - luaL_error(L, "element at index %d is not a number", i); - } - THStorage_(set)(storage, i-1, LUA_NUMBER_TO_REAL(lua_tonumber(L, -1))); - lua_pop(L, 1); - } - } - else if(lua_type(L, index) == LUA_TUSERDATA) - { - if (allocator) - THError("Passing allocator not supported when using storage views"); - - THStorage *src = luaT_checkudata(L, index, torch_Storage); - real *ptr = src->data; - ptrdiff_t offset = luaL_optinteger(L, index + 1, 1) - 1; - if (offset < 0 || offset >= src->size) { - luaL_error(L, "offset out of bounds"); - } - ptrdiff_t size = luaL_optinteger(L, index + 2, src->size - offset); - if (size < 1 || size > (src->size - offset)) { - luaL_error(L, "size out of bounds"); - } - storage = THStorage_(newWithData)(ptr + offset, size); - storage->flag = TH_STORAGE_REFCOUNTED | TH_STORAGE_VIEW; - storage->view = src; - THStorage_(retain)(storage->view); - } - else if(lua_type(L, index + 1) == LUA_TNUMBER) - { - ptrdiff_t size = luaL_optinteger(L, index, 0); - real *ptr = (real *)luaL_optinteger(L, index + 1, 0); - if (allocator) - storage = THStorage_(newWithDataAndAllocator)(ptr, size, allocator, NULL); - else - storage = THStorage_(newWithData)(ptr, size); - storage->flag = TH_STORAGE_REFCOUNTED; - } - else - { - ptrdiff_t size = luaL_optinteger(L, index, 0); - if (allocator) - storage = THStorage_(newWithAllocator)(size, allocator, NULL); - else - storage = THStorage_(newWithSize)(size); - } - luaT_pushudata(L, storage, torch_Storage); - return 1; -} - -static int torch_Storage_(retain)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - THStorage_(retain)(storage); - return 0; -} - -static int torch_Storage_(free)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - THStorage_(free)(storage); - return 0; -} - -static int torch_Storage_(resize)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - ptrdiff_t size = luaL_checkinteger(L, 2); -/* int keepContent = luaT_optboolean(L, 3, 0); */ - THStorage_(resize)(storage, size);/*, keepContent); */ - lua_settop(L, 1); - return 1; -} - -static int torch_Storage_(copy)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - void *src; - if( (src = luaT_toudata(L, 2, torch_Storage)) ) - THStorage_(copy)(storage, src); - else if( (src = luaT_toudata(L, 2, "torch.ByteStorage")) ) - THStorage_(copyByte)(storage, src); - else if( (src = luaT_toudata(L, 2, "torch.CharStorage")) ) - THStorage_(copyChar)(storage, src); - else if( (src = luaT_toudata(L, 2, "torch.ShortStorage")) ) - THStorage_(copyShort)(storage, src); - else if( (src = luaT_toudata(L, 2, "torch.IntStorage")) ) - THStorage_(copyInt)(storage, src); - else if( (src = luaT_toudata(L, 2, "torch.LongStorage")) ) - THStorage_(copyLong)(storage, src); - else if( (src = luaT_toudata(L, 2, "torch.FloatStorage")) ) - THStorage_(copyFloat)(storage, src); - else if( (src = luaT_toudata(L, 2, "torch.DoubleStorage")) ) - THStorage_(copyDouble)(storage, src); - else if( (src = luaT_toudata(L, 2, "torch.HalfStorage")) ) - THStorage_(copyHalf)(storage, src); - else - luaL_typerror(L, 2, "torch.*Storage"); - lua_settop(L, 1); - return 1; -} - -static int torch_Storage_(fill)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - real value = luaG_(checkreal)(L, 2); - THStorage_(fill)(storage, value); - lua_settop(L, 1); - return 1; -} - -static int torch_Storage_(elementSize)(lua_State *L) -{ - luaT_pushinteger(L, THStorage_(elementSize)()); - return 1; -} - -static int torch_Storage_(__len__)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - luaT_pushinteger(L, storage->size); - return 1; -} - -static int torch_Storage_(__newindex__)(lua_State *L) -{ - if(lua_isnumber(L, 2)) - { - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - ptrdiff_t index = luaL_checkinteger(L, 2) - 1; - real number = luaG_(checkreal)(L, 3); - THStorage_(set)(storage, index, number); - lua_pushboolean(L, 1); - } - else - lua_pushboolean(L, 0); - - return 1; -} - -static int torch_Storage_(__index__)(lua_State *L) -{ - if(lua_isnumber(L, 2)) - { - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - ptrdiff_t index = luaL_checkinteger(L, 2) - 1; - luaG_(pushreal)(L, THStorage_(get)(storage, index)); - lua_pushboolean(L, 1); - return 2; - } - else - { - lua_pushboolean(L, 0); - return 1; - } -} - -#if defined(TH_REAL_IS_CHAR) || defined(TH_REAL_IS_BYTE) -static int torch_Storage_(string)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - if(lua_isstring(L, -1)) - { - size_t len = 0; - const char *str = lua_tolstring(L, -1, &len); - THStorage_(resize)(storage, len); - memmove(storage->data, str, len); - lua_settop(L, 1); - } - else - lua_pushlstring(L, (char*)storage->data, storage->size); - - return 1; /* either storage or string */ -} -#endif - -static int torch_Storage_(text)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - struct _rspamd_lua_text *t; - - if(lua_type(L, -1) == LUA_TUSERDATA) - { - t = lua_touserdata(L, -1); - THStorage_(resize)(storage, t->len); - memmove(storage->data, t->start, t->len); - lua_settop(L, 1); - } - else { - t = lua_newuserdata (L, sizeof (*t)); - t->start = (const char *)storage->data; - t->len = t->len; - t->flags = 0; - } - - return 1; /* either storage or string */ -} - -static int torch_Storage_(totable)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - ptrdiff_t i; - - lua_newtable(L); - for(i = 0; i < storage->size; i++) - { - luaG_(pushreal)(L, storage->data[i]); - lua_rawseti(L, -2, i+1); - } - return 1; -} - -static int torch_Storage_(factory)(lua_State *L) -{ - THStorage *storage = THStorage_(new)(); - luaT_pushudata(L, storage, torch_Storage); - return 1; -} - -static int torch_Storage_(write)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - THFile *file = luaT_checkudata(L, 2, "torch.File"); - -#ifdef DEBUG - THAssert(storage->size < LONG_MAX); -#endif - THFile_writeLongScalar(file, storage->size); - THFile_writeRealRaw(file, storage->data, storage->size); - - return 0; -} - -static int torch_Storage_(read)(lua_State *L) -{ - THStorage *storage = luaT_checkudata(L, 1, torch_Storage); - THFile *file = luaT_checkudata(L, 2, "torch.File"); - ptrdiff_t size = THFile_readLongScalar(file); - - THStorage_(resize)(storage, size); - THFile_readRealRaw(file, storage->data, storage->size); - - return 0; -} - -static const struct luaL_Reg torch_Storage_(_) [] = { - {"retain", torch_Storage_(retain)}, - {"free", torch_Storage_(free)}, - {"size", torch_Storage_(__len__)}, - {"elementSize", torch_Storage_(elementSize)}, - {"__len__", torch_Storage_(__len__)}, - {"__newindex__", torch_Storage_(__newindex__)}, - {"__index__", torch_Storage_(__index__)}, - {"resize", torch_Storage_(resize)}, - {"fill", torch_Storage_(fill)}, - {"copy", torch_Storage_(copy)}, - {"totable", torch_Storage_(totable)}, - {"write", torch_Storage_(write)}, - {"read", torch_Storage_(read)}, - {"text", torch_Storage_(text)}, -#if defined(TH_REAL_IS_CHAR) || defined(TH_REAL_IS_BYTE) - {"string", torch_Storage_(string)}, -#endif - {NULL, NULL} -}; - -void torch_Storage_(init)(lua_State *L) -{ - luaT_newmetatable(L, torch_Storage, NULL, - torch_Storage_(new), torch_Storage_(free), torch_Storage_(factory)); - luaT_setfuncs(L, torch_Storage_(_), 0); - lua_pop(L, 1); -} - -#endif diff --git a/contrib/lua-torch/torch7/generic/Tensor.c b/contrib/lua-torch/torch7/generic/Tensor.c deleted file mode 100644 index 112a4bd63d..0000000000 --- a/contrib/lua-torch/torch7/generic/Tensor.c +++ /dev/null @@ -1,1360 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/Tensor.c" -#else - -#include "luaG.h" - -static void torch_Tensor_(c_readTensorStorageSizeStride)(lua_State *L, int index, int allowNone, int allowTensor, int allowStorage, int allowStride, - THStorage **storage_, ptrdiff_t *storageOffset_, THLongStorage **size_, THLongStorage **stride_); - -static void torch_Tensor_(c_readSizeStride)(lua_State *L, int index, int allowStride, THLongStorage **size_, THLongStorage **stride_); - -static int torch_Tensor_(size)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - if(lua_isnumber(L,2)) - { - int dim = luaL_checkint(L, 2)-1; - THArgCheck(dim >= 0 && dim < tensor->nDimension, 2, "dimension %d out of range of %dD tensor", - dim+1, THTensor_(nDimension)(tensor)); - luaT_pushlong(L, tensor->size[dim]); - } - else - { - THLongStorage *size = THTensor_(newSizeOf)(tensor); - luaT_pushudata(L, size, "torch.LongStorage"); - } - return 1; -} - -static int torch_Tensor_(elementSize)(lua_State *L) -{ - luaT_pushinteger(L, THStorage_(elementSize)()); - return 1; -} - -static int torch_Tensor_(stride)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - if(lua_isnumber(L,2)) - { - int dim = luaL_checkint(L, 2)-1; - THArgCheck(dim >= 0 && dim < tensor->nDimension, 2, "dimension %d out of range of %dD tensor", - dim+1, THTensor_(nDimension)(tensor)); - luaT_pushlong(L, tensor->stride[dim]); - } - else - { - THLongStorage *storage = THLongStorage_newWithSize(tensor->nDimension); - memmove(storage->data, tensor->stride, sizeof(long)*tensor->nDimension); - luaT_pushudata(L, storage, "torch.LongStorage"); - } - return 1; -} - -static int torch_Tensor_(nDimension)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - luaT_pushinteger(L, tensor->nDimension); - return 1; -} - -static int torch_Tensor_(storage)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - if(tensor->storage) - { - THStorage_(retain)(tensor->storage); - luaT_pushudata(L, tensor->storage, torch_Storage); - } - else - lua_pushnil(L); - - return 1; -} - -static int torch_Tensor_(storageOffset)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - luaT_pushinteger(L, tensor->storageOffset+1); - return 1; -} - -static int torch_Tensor_(new)(lua_State *L) -{ - THTensor *tensor; - ptrdiff_t storageOffset; - THLongStorage *size, *stride; - - if(lua_type(L, 1) == LUA_TTABLE) - { - ptrdiff_t i, j; - THLongStorage *counter; - ptrdiff_t si = 0; - int dimension = 0; - int is_finished = 0; - - lua_settop(L, 1); - size = THLongStorage_new(); - - while( (lua_type(L, -1) == LUA_TTABLE) && (lua_objlen(L, -1) > 0) ) - { - THLongStorage_resize(size, dimension+1); - size->data[dimension] = lua_objlen(L, -1); - dimension++; - lua_rawgeti(L, -1, 1); - } - lua_pop(L, 1); - - counter = THLongStorage_newWithSize(size->size); - THLongStorage_fill(counter, 0); - - tensor = THTensor_(newWithSize)(size, NULL); - - if(size->size == 0) - is_finished = 1; - - while(!is_finished) - { - if(!lua_istable(L, -1)) - { - THLongStorage_free(size); - THLongStorage_free(counter); - THTensor_(free)(tensor); - THError("invalid tensor definition"); - } - - if(lua_objlen(L, -1) != size->data[size->size-1]) - { - THLongStorage_free(size); - THLongStorage_free(counter); - THTensor_(free)(tensor); - THError("invalid tensor sizes"); - } - - for(i = 0; i < size->data[size->size-1]; i++) - { - lua_rawgeti(L, -1, i+1); - if(!lua_isnumber(L, -1)) - { - THLongStorage_free(size); - THLongStorage_free(counter); - THTensor_(free)(tensor); - THError("invalid element (not a number)"); - } - THStorage_(set)(THTensor_(storage)(tensor), si++, luaG_(checkreal)(L, -1)); - lua_pop(L, 1); - } - - if(size->size == 1) - break; - - for(i = size->size-2; i >= 0; i--) - { - if(++counter->data[i] == size->data[i]) - { - if(i == 0) - { - is_finished = 1; - break; - } - else - { - counter->data[i] = 0; - lua_pop(L, 1); - } - } - else - { - lua_pop(L, 1); - for(j = i; j < size->size-1; j++) - { - if(!lua_istable(L, -1)) - { - THLongStorage_free(size); - THLongStorage_free(counter); - THTensor_(free)(tensor); - THError("invalid tensor definition"); - } - if(lua_objlen(L, -1) != size->data[j]) - { - THLongStorage_free(size); - THLongStorage_free(counter); - THTensor_(free)(tensor); - THError("invalid tensor sizes"); - } - lua_rawgeti(L, -1, counter->data[j]+1); - } - break; - } - } - } - - THLongStorage_free(size); - THLongStorage_free(counter); - } - else - { - THStorage *storage; - - torch_Tensor_(c_readTensorStorageSizeStride)(L, 1, 1, 1, 1, 1, - &storage, &storageOffset, &size, &stride); - - tensor = THTensor_(newWithStorage)(storage, storageOffset, size, stride); - - THLongStorage_free(size); - THLongStorage_free(stride); - } - - luaT_pushudata(L, tensor, torch_Tensor); - return 1; -} - -static int torch_Tensor_(set)(lua_State *L) -{ - THTensor *self = luaT_checkudata(L, 1, torch_Tensor); - THStorage *storage; - ptrdiff_t storageOffset; - THLongStorage *size, *stride; - - torch_Tensor_(c_readTensorStorageSizeStride)(L, 2, 1, 1, 1, 1, - &storage, &storageOffset, &size, &stride); - - THTensor_(setStorage)(self, storage, storageOffset, size, stride); - - THLongStorage_free(size); - THLongStorage_free(stride); - - lua_settop(L, 1); - return 1; -} - -static int torch_Tensor_(clone)(lua_State *L) -{ - THTensor *self = luaT_checkudata(L, 1, torch_Tensor); - self = THTensor_(newClone)(self); - luaT_pushudata(L, self, torch_Tensor); - return 1; -} - -static int torch_Tensor_(contiguous)(lua_State *L) -{ - THTensor *self = luaT_checkudata(L, 1, torch_Tensor); - self = THTensor_(newContiguous)(self); - luaT_pushudata(L, self, torch_Tensor); - return 1; -} - -/* Resize */ -static int torch_Tensor_(resizeAs)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THTensor *src = luaT_checkudata(L, 2, torch_Tensor); - THTensor_(resizeAs)(tensor, src); - lua_settop(L, 1); - return 1; -} - -static int torch_Tensor_(resize)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THLongStorage *size, *stride; - - torch_Tensor_(c_readSizeStride)(L, 2, 0, &size, &stride); - - THTensor_(resize)(tensor, size, stride); - - THLongStorage_free(size); - THLongStorage_free(stride); - - lua_settop(L, 1); - return 1; -} - -static int torch_Tensor_(narrow)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - int dimension = luaL_checkint(L, 2)-1; - long firstIndex = luaL_checklong(L, 3)-1; - long size = luaL_checklong(L, 4); - -/* THArgCheck( (dimension >= 0) && (dimension < tensor->nDimension), 2, "out of range"); - THArgCheck( (firstIndex >= 0) && (firstIndex < tensor->size[dimension]), 3, "out of range"); - THArgCheck( (size > 0) && (firstIndex+size <= tensor->size[dimension]), 4, "out of range"); -*/ - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, dimension, firstIndex, size); - luaT_pushudata(L, tensor, torch_Tensor); - return 1; -} - -static int torch_Tensor_(sub)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - long d0s = -1, d0e = -1, d1s = -1, d1e = -1, d2s = -1, d2e = -1, d3s = -1, d3e = -1; - - d0s = luaL_checklong(L, 2)-1; - d0e = luaL_checklong(L, 3)-1; - if(d0s < 0) - d0s += tensor->size[0]+1; - if(d0e < 0) - d0e += tensor->size[0]+1; - THArgCheck(tensor->nDimension > 0, 2, "invalid dimension"); - THArgCheck(d0s >= 0 && d0s < tensor->size[0], 2, "out of range"); - THArgCheck(d0e >= 0 && d0e < tensor->size[0], 3, "out of range"); - THArgCheck(d0e >= d0s, 3, "end smaller than beginning"); - - if(!lua_isnone(L, 4)) - { - d1s = luaL_checklong(L, 4)-1; - d1e = luaL_checklong(L, 5)-1; - if(d1s < 0) - d1s += tensor->size[1]+1; - if(d1e < 0) - d1e += tensor->size[1]+1; - THArgCheck(tensor->nDimension > 1, 4, "invalid dimension"); - THArgCheck(d1s >= 0 && d1s < tensor->size[1], 4, "out of range"); - THArgCheck(d1e >= 0 && d1e < tensor->size[1], 5, "out of range"); - THArgCheck(d1e >= d1s, 5, "end smaller than beginning"); - - if(!lua_isnone(L, 6)) - { - d2s = luaL_checklong(L, 6)-1; - d2e = luaL_checklong(L, 7)-1; - if(d2s < 0) - d2s += tensor->size[2]+1; - if(d2e < 0) - d2e += tensor->size[2]+1; - THArgCheck(tensor->nDimension > 2, 6, "invalid dimension"); - THArgCheck(d2s >= 0 && d2s < tensor->size[2], 6, "out of range"); - THArgCheck(d2e >= 0 && d2e < tensor->size[2], 7, "out of range"); - THArgCheck(d2e >= d2s, 7, "end smaller than beginning"); - - if(!lua_isnone(L, 8)) - { - d3s = luaL_checklong(L, 8)-1; - d3e = luaL_checklong(L, 9)-1; - if(d3s < 0) - d3s += tensor->size[3]+1; - if(d3e < 0) - d3e += tensor->size[3]+1; - THArgCheck(tensor->nDimension > 3, 8, "invalid dimension"); - THArgCheck(d3s >= 0 && d3s < tensor->size[3], 8, "out of range"); - THArgCheck(d3e >= 0 && d3e < tensor->size[3], 9, "out of range"); - THArgCheck(d3e >= d3s, 9, "end smaller than beginning"); - } - } - } - - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, d0s, d0e-d0s+1); - if(d1s >= 0) - THTensor_(narrow)(tensor, NULL, 1, d1s, d1e-d1s+1); - if(d2s >= 0) - THTensor_(narrow)(tensor, NULL, 2, d2s, d2e-d2s+1); - if(d3s >= 0) - THTensor_(narrow)(tensor, NULL, 3, d3s, d3e-d3s+1); - luaT_pushudata(L, tensor, torch_Tensor); - return 1; -} - -static int torch_Tensor_(select)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - int dimension = luaL_checkint(L, 2)-1; - long sliceIndex = luaL_checklong(L, 3)-1; - -/* THArgCheck(src->nDimension > 1, 1, "cannot select on a vector"); - THArgCheck((dimension >= 0) && (dimension < src->nDimension), 2, "out of range"); - THArgCheck((sliceIndex >= 0) && (sliceIndex < src->size[dimension]), 3, "out of range"); -*/ - - if(tensor->nDimension > 1) - { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(select)(tensor, NULL, dimension, sliceIndex); - luaT_pushudata(L, tensor, torch_Tensor); - } - else - { - THArgCheck(tensor->nDimension == 1, 1, "empty Tensor"); - luaG_(pushreal)(L, THTensor_(get1d)(tensor, sliceIndex)); - } - - return 1; -} - -#ifndef TH_REAL_IS_HALF -static int torch_Tensor_(indexSelect)(lua_State *L) -{ - int narg = lua_gettop(L); - THTensor *tensor, *src; - THLongTensor *index; - int dim; - if (narg == 3) - { - tensor = THTensor_(new)(); - src = luaT_checkudata(L, 1, torch_Tensor); - dim = luaL_checkint(L, 2) - 1; - index = luaT_checkudata(L, 3, "torch.LongTensor"); - luaT_pushudata(L,tensor,torch_Tensor); - } - else if(narg == 4) - { - src = luaT_checkudata(L, 2, torch_Tensor); - dim = luaL_checkint(L, 3) - 1; - index = luaT_checkudata(L, 4, "torch.LongTensor"); - tensor = luaT_checkudata(L,1,torch_Tensor); - } - else - { - THError(torch_Tensor ", number, torch.LongTensor | " torch_Tensor ", " torch_Tensor ", number, torch.LongTensor expected"); - return 0; - } - - THTensor_(indexSelect)(tensor,src,dim,index); - - return 1; -} - -static int torch_Tensor_(indexCopy)(lua_State *L) -{ - int narg = lua_gettop(L); - THTensor *tensor, *src; - THLongTensor *index; - int dim; - if(narg == 4) - { - dim = luaL_checkint(L, 2) - 1; - index = luaT_checkudata(L, 3, "torch.LongTensor"); - src = luaT_checkudata(L, 4, torch_Tensor); - tensor = luaT_checkudata(L,1,torch_Tensor); - } - else - { - THError( torch_Tensor ", number, torch.LongTensor, " torch_Tensor " expected"); - return 0; - } - - THTensor_(indexCopy)(tensor,dim,index,src); - - return 1; -} - -static int torch_Tensor_(indexAdd)(lua_State *L) -{ - int narg = lua_gettop(L); - THTensor *tensor, *src; - THLongTensor *index; - int dim; - if(narg == 4) - { - dim = luaL_checkint(L, 2) - 1; - index = luaT_checkudata(L, 3, "torch.LongTensor"); - src = luaT_checkudata(L, 4, torch_Tensor); - tensor = luaT_checkudata(L,1,torch_Tensor); - } - else - { - THError( torch_Tensor ", number, torch.LongTensor, " torch_Tensor " expected"); - return 0; - } - - THTensor_(indexAdd)(tensor,dim,index,src); - - return 1; -} - -static int torch_Tensor_(indexFill)(lua_State *L) -{ - int narg = lua_gettop(L); - THTensor *tensor; - THLongTensor *index; - real val; - int dim; - if(narg == 4) - { - dim = luaL_checkint(L, 2) - 1; - index = luaT_checkudata(L, 3, "torch.LongTensor"); - val = luaG_(checkreal)(L, 4); - tensor = luaT_checkudata(L,1,torch_Tensor); - } - else - { - THError( torch_Tensor ", number, torch.LongTensor, number expected"); - return 0; - } - - THTensor_(indexFill)(tensor,dim,index,val); - - return 1; -} - -static int torch_Tensor_(maskedSelect)(lua_State *L) -{ - int narg = lua_gettop(L); - THTensor *tensor, *src; - THByteTensor *mask; - - if (narg == 2) - { - tensor = THTensor_(new)(); - src = luaT_checkudata(L, 1, torch_Tensor); - mask = luaT_checkudata(L, 2, "torch.ByteTensor"); - luaT_pushudata(L,tensor,torch_Tensor); - } - else if(narg == 3) - { - src = luaT_checkudata(L, 2, torch_Tensor); - mask = luaT_checkudata(L, 3, "torch.ByteTensor"); - tensor = luaT_checkudata(L,1,torch_Tensor); - } - else - { - THError( torch_Tensor ", torch.ByteTensor | " torch_Tensor ", " torch_Tensor ", torch.ByteTensor expected"); - return 0; - } - - THTensor_(maskedSelect)(tensor,src,mask); - - return 1; -} - -static int torch_Tensor_(maskedCopy)(lua_State *L) -{ - int narg = lua_gettop(L); - THTensor *tensor, *src; - THByteTensor *mask; - - if(narg == 3) - { - mask = luaT_checkudata(L, 2, "torch.ByteTensor"); - src = luaT_checkudata(L, 3, torch_Tensor); - tensor = luaT_checkudata(L,1,torch_Tensor); - } - else - { - THError( torch_Tensor ", torch.ByteTensor, " torch_Tensor " expected"); - return 0; - } - - THTensor_(maskedCopy)(tensor,mask,src); - - /* return destination */ - lua_pop(L, 2); - - return 1; -} - -static int torch_Tensor_(maskedFill)(lua_State *L) -{ - int narg = lua_gettop(L); - THTensor *tensor; - THByteTensor *mask; - real val; - if(narg == 3) - { - mask = luaT_checkudata(L, 2, "torch.ByteTensor"); - val = luaG_(checkreal)(L, 3); - tensor = luaT_checkudata(L,1,torch_Tensor); - } - else - { - THError( torch_Tensor ", torch.ByteTensor, number expected"); - return 0; - } - - THTensor_(maskedFill)(tensor,mask,val); - - return 1; -} -#endif - -static int torch_Tensor_(transpose)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - int dimension1 = luaL_checkint(L, 2)-1; - int dimension2 = luaL_checkint(L, 3)-1; - -/* - THArgCheck( (dimension1 >= 0) && (dimension1 < src->nDimension), 2, "out of range"); - THArgCheck( (dimension2 >= 0) && (dimension2 < src->nDimension), 3, "out of range"); -*/ - - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(transpose)(tensor, NULL, dimension1, dimension2); - luaT_pushudata(L, tensor, torch_Tensor); - return 1; -} - -static int torch_Tensor_(t)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - - THArgCheck(tensor->nDimension == 2, 1, "Tensor must have 2 dimensions"); - - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(transpose)(tensor, NULL, 0, 1); - luaT_pushudata(L, tensor, torch_Tensor); - return 1; -} - -static int torch_Tensor_(unfold)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - int dimension = luaL_checkint(L, 2)-1; - long size = luaL_checklong(L, 3); - long step = luaL_checklong(L, 4); - -/* - THArgCheck( (src->nDimension > 0), 1, "cannot unfold an empty tensor"); - THArgCheck(dimension < src->nDimension, 2, "out of range"); - THArgCheck(size <= src->size[dimension], 3, "out of range"); -*/ - - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(unfold)(tensor, NULL, dimension, size, step); - luaT_pushudata(L, tensor, torch_Tensor); - return 1; -} - -/* is contiguous? [a bit like in TnXIterator] */ -static int torch_Tensor_(isContiguous)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - lua_pushboolean(L, THTensor_(isContiguous)(tensor)); - return 1; -} - -static int torch_Tensor_(isSize)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THLongStorage *size = luaT_checkudata(L, 2, "torch.LongStorage"); - lua_pushboolean(L, THTensor_(isSize)(tensor, size)); - return 1; -} - -static int torch_Tensor_(isSameSizeAs)(lua_State *L) -{ - THTensor *tensor1 = luaT_checkudata(L, 1, torch_Tensor); - THTensor *tensor2 = luaT_checkudata(L, 2, torch_Tensor); - lua_pushboolean(L, THTensor_(isSameSizeAs)(tensor1, tensor2)); - return 1; -} - -static int torch_Tensor_(isSetTo)(lua_State *L) -{ - THTensor *tensor1 = luaT_checkudata(L, 1, torch_Tensor); - THTensor *tensor2 = luaT_checkudata(L, 2, torch_Tensor); - lua_pushboolean(L, THTensor_(isSetTo)(tensor1, tensor2)); - return 1; -} - -static int torch_Tensor_(nElement)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - luaT_pushinteger(L, THTensor_(nElement)(tensor)); - return 1; -} - -static int torch_Tensor_(copy)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - void *src; - if( (src = luaT_toudata(L, 2, torch_Tensor)) ) - THTensor_(copy)(tensor, src); - else if( (src = luaT_toudata(L, 2, "torch.ByteTensor")) ) - THTensor_(copyByte)(tensor, src); - else if( (src = luaT_toudata(L, 2, "torch.CharTensor")) ) - THTensor_(copyChar)(tensor, src); - else if( (src = luaT_toudata(L, 2, "torch.ShortTensor")) ) - THTensor_(copyShort)(tensor, src); - else if( (src = luaT_toudata(L, 2, "torch.IntTensor")) ) - THTensor_(copyInt)(tensor, src); - else if( (src = luaT_toudata(L, 2, "torch.LongTensor")) ) - THTensor_(copyLong)(tensor, src); - else if( (src = luaT_toudata(L, 2, "torch.FloatTensor")) ) - THTensor_(copyFloat)(tensor, src); - else if( (src = luaT_toudata(L, 2, "torch.DoubleTensor")) ) - THTensor_(copyDouble)(tensor, src); - else if( (src = luaT_toudata(L, 2, "torch.HalfTensor")) ) - THTensor_(copyHalf)(tensor, src); - else - luaL_typerror(L, 2, "torch.*Tensor"); - lua_settop(L, 1); - return 1; -} - -static int torch_Tensor_(__newindex__)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THLongStorage *idx = NULL; - THByteTensor *mask; - - if(lua_isnumber(L, 2)) - { - void *src; - long index = luaL_checklong(L,2)-1; - THArgCheck(tensor->nDimension > 0, 1, "empty tensor"); - if (index < 0) index = tensor->size[0] + index + 1; - - if (lua_isnumber(L,3)) { - real value = luaG_(checkreal)(L,3); - if (tensor->nDimension == 1) { - THArgCheck(index >= 0 && index < tensor->size[0], 2, "out of range"); - THStorage_(set)(tensor->storage, tensor->storageOffset+index*tensor->stride[0], value); - } else { -#ifndef TH_REAL_IS_HALF - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, index, 1); - THTensor_(fill)(tensor, value); - THTensor_(free)(tensor); -#else - THError("fill on torch.HalfTensor not yet supported"); -#endif - } - } else if( (src = luaT_toudata(L, 3, torch_Tensor)) ) { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, index, 1); - THTensor_(copy)(tensor, src); - THTensor_(free)(tensor); - } else if( (src = luaT_toudata(L, 3, "torch.ByteTensor")) ) { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, index, 1); - THTensor_(copyByte)(tensor, src); - THTensor_(free)(tensor); - } else if( (src = luaT_toudata(L, 3, "torch.CharTensor")) ) { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, index, 1); - THTensor_(copyChar)(tensor, src); - THTensor_(free)(tensor); - } else if( (src = luaT_toudata(L, 3, "torch.ShortTensor")) ) { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, index, 1); - THTensor_(copyShort)(tensor, src); - THTensor_(free)(tensor); - } else if( (src = luaT_toudata(L, 3, "torch.IntTensor")) ) { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, index, 1); - THTensor_(copyInt)(tensor, src); - THTensor_(free)(tensor); - } else if( (src = luaT_toudata(L, 3, "torch.LongTensor")) ) { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, index, 1); - THTensor_(copyLong)(tensor, src); - THTensor_(free)(tensor); - } else if( (src = luaT_toudata(L, 3, "torch.FloatTensor")) ) { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, index, 1); - THTensor_(copyFloat)(tensor, src); - THTensor_(free)(tensor); - } else if( (src = luaT_toudata(L, 3, "torch.DoubleTensor")) ) { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, index, 1); - THTensor_(copyDouble)(tensor, src); - THTensor_(free)(tensor); - } else if( (src = luaT_toudata(L, 3, "torch.HalfTensor")) ) { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(tensor, NULL, 0, index, 1); - THTensor_(copyHalf)(tensor, src); - THTensor_(free)(tensor); - } else { - luaL_typerror(L, 3, "torch.*Tensor"); - } - lua_pushboolean(L, 1); - } - else if((idx = luaT_toudata(L, 2, "torch.LongStorage"))) - { - ptrdiff_t index = THTensor_(storageOffset)(tensor); - real value = luaG_(checkreal)(L,3); - int dim; - - THArgCheck(idx->size == tensor->nDimension, 2, "invalid size"); - - for(dim = 0; dim < idx->size; dim++) - { - long z = idx->data[dim]-1; - if (z < 0) z = tensor->size[dim] + z + 1; - THArgCheck((z >= 0) && (z < tensor->size[dim]), 2, "index out of bound"); - index += z*tensor->stride[dim]; - } - - THStorage_(set)(tensor->storage, index, value); - lua_pushboolean(L, 1); - } - else if(lua_istable(L, 2)) - { - int dim; - int cdim = 0; - int ndims; - int done = 0; - ndims = tensor->nDimension; - THArgCheck(lua_objlen(L, 2) <= ndims, 2, "too many indices provided"); - tensor = THTensor_(newWithTensor)(tensor); - for(dim = 0; dim < ndims; dim++) - { - lua_rawgeti(L, 2, dim+1); - if(lua_isnumber(L, -1)) - { - long z = lua_tonumber(L, -1)-1; - lua_pop(L, 1); - if (z < 0) z = tensor->size[cdim] + z + 1; - THArgCheck((z >= 0) && (z < tensor->size[cdim]), 2, "index out of bound"); - if(tensor->nDimension == 1) { - real value = luaG_(checkreal)(L,3); - done = 1; - THStorage_(set)(tensor->storage, tensor->storageOffset+z*tensor->stride[0], value); - } else { - THTensor_(select)(tensor, NULL, cdim, z); - } - } - else if (lua_istable(L, -1)) - { - long start = 0; - long end = tensor->size[cdim]-1; - lua_rawgeti(L, -1, 1); - if(lua_isnumber(L, -1)) { - start = lua_tonumber(L, -1)-1; - end = start; - } - lua_pop(L, 1); - if (start < 0) start = tensor->size[cdim] + start + 1; - THArgCheck((start >= 0) && (start < tensor->size[cdim]), 2, "start index out of bound"); - - lua_rawgeti(L, -1, 2); - if(lua_isnumber(L, -1)) { - end = lua_tonumber(L, -1)-1; - } - lua_pop(L, 2); - if (end < 0) end = tensor->size[cdim] + end + 1; - THArgCheck((end >= 0) && (end < tensor->size[cdim]), 2, "end index out of bound"); - - THArgCheck((end >= start), 2, "end index must be greater or equal to start index"); - - THTensor_(narrow)(tensor, NULL, cdim++, start, end-start+1); - } - else - { - break; - } - } - if(!done) { - /* doing a copy */ - void *src; - if (lua_isnumber(L,3)) { -#ifndef TH_REAL_IS_HALF - THTensor_(fill)(tensor, LUA_NUMBER_TO_REAL(lua_tonumber(L,3))); -#else - THError("fill on torch.HalfTensor not yet supported"); -#endif - } else if( (src = luaT_toudata(L, 3, torch_Tensor)) ) { - THTensor_(copy)(tensor, src); - } else if( (src = luaT_toudata(L, 3, "torch.ByteTensor")) ) { - THTensor_(copyByte)(tensor, src); - } else if( (src = luaT_toudata(L, 3, "torch.CharTensor")) ) { - THTensor_(copyChar)(tensor, src); - } else if( (src = luaT_toudata(L, 3, "torch.ShortTensor")) ) { - THTensor_(copyShort)(tensor, src); - } else if( (src = luaT_toudata(L, 3, "torch.IntTensor")) ) { - THTensor_(copyInt)(tensor, src); - } else if( (src = luaT_toudata(L, 3, "torch.LongTensor")) ) { - THTensor_(copyLong)(tensor, src); - } else if( (src = luaT_toudata(L, 3, "torch.FloatTensor")) ) { - THTensor_(copyFloat)(tensor, src); - } else if( (src = luaT_toudata(L, 3, "torch.DoubleTensor")) ) { - THTensor_(copyDouble)(tensor, src); - } else if( (src = luaT_toudata(L, 3, "torch.HalfTensor")) ) { - THTensor_(copyHalf)(tensor, src); - } else { - luaL_typerror(L, 3, "torch.*Tensor"); - } - } - THTensor_(free)(tensor); - lua_pushboolean(L, 1); - } - else if((mask = luaT_toudata(L, 2, "torch.ByteTensor"))) - { -#ifndef TH_REAL_IS_HALF - THTensor *vals; - if (lua_isnumber(L, 3)) - { - THTensor_(maskedFill)(tensor, mask, luaG_(checkreal)(L,3)); - } - else if((vals = luaT_toudata(L, 3, torch_Tensor))) - { - THTensor_(maskedCopy)(tensor, mask, vals); - } - else - { - THError("number or " torch_Tensor " expected"); - } -#else - THError("ByteTensor indexing not yet supported with half types"); -#endif - } - else - lua_pushboolean(L, 0); - - return 1; -} - -static int torch_Tensor_(__index__)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THLongStorage *idx = NULL; - THByteTensor *mask; - - if(lua_isnumber(L, 2)) - { - long index = luaL_checklong(L,2)-1; - - THArgCheck(tensor->nDimension > 0, 1, "empty tensor"); - if (index < 0) index = tensor->size[0] + index + 1; - THArgCheck(index >= 0 && index < tensor->size[0], 2, "out of range"); - - if(tensor->nDimension == 1) - { - luaG_(pushreal)(L, THStorage_(get)(tensor->storage, tensor->storageOffset+index*tensor->stride[0])); - } - else - { - tensor = THTensor_(newWithTensor)(tensor); - THTensor_(select)(tensor, NULL, 0, index); - luaT_pushudata(L, tensor, torch_Tensor); - } - lua_pushboolean(L, 1); - return 2; - } - else if((idx = luaT_toudata(L, 2, "torch.LongStorage"))) - { - ptrdiff_t index = THTensor_(storageOffset)(tensor); - int dim; - - THArgCheck(idx->size == tensor->nDimension, 2, "invalid size"); - - for(dim = 0; dim < idx->size; dim++) - { - long z = idx->data[dim]-1; - if (z < 0) z = tensor->size[dim] + z + 1; - THArgCheck((z >= 0) && (z < tensor->size[dim]), 2, "index out of bound"); - index += z*tensor->stride[dim]; - } - luaG_(pushreal)(L, THStorage_(get)(THTensor_(storage)(tensor), index)); - lua_pushboolean(L, 1); - return 2; - } - else if(lua_istable(L, 2)) - { - int dim; - int cdim = 0; - int ndims; - int done = 0; - - ndims = tensor->nDimension; - THArgCheck(lua_objlen(L, 2) <= ndims, 2, "too many indices provided"); - tensor = THTensor_(newWithTensor)(tensor); - - for(dim = 0; dim < ndims; dim++) - { - lua_rawgeti(L, 2, dim+1); - if(lua_isnumber(L, -1)) - { - long z = lua_tonumber(L, -1)-1; - lua_pop(L, 1); - if (z < 0) z = tensor->size[cdim] + z + 1; - THArgCheck((z >= 0) && (z < tensor->size[cdim]), 2, "index out of bound"); - if(tensor->nDimension == 1) { - done = 1; - luaG_(pushreal)(L, THStorage_(get)(tensor->storage, tensor->storageOffset+z*tensor->stride[0])); - } else { - THTensor_(select)(tensor, NULL, cdim, z); - } - } - else if (lua_istable(L, -1)) - { - long start = 0; - long end = tensor->size[cdim]-1; - lua_rawgeti(L, -1, 1); - if(lua_isnumber(L, -1)) { - start = lua_tonumber(L, -1)-1; - end = start; - } - lua_pop(L, 1); - if (start < 0) start = tensor->size[cdim] + start + 1; - THArgCheck((start >= 0) && (start < tensor->size[cdim]), 2, "start index out of bound"); - - lua_rawgeti(L, -1, 2); - if(lua_isnumber(L, -1)) { - end = lua_tonumber(L, -1)-1; - } - lua_pop(L, 2); - if (end < 0) end = tensor->size[cdim] + end + 1; - THArgCheck((end >= 0) && (end < tensor->size[cdim]), 2, "end index out of bound"); - - THArgCheck((end >= start), 2, "end index must be greater or equal to start index"); - - THTensor_(narrow)(tensor, NULL, cdim++, start, end-start+1); - } - else - { - break; - } - } - if(!done) { - luaT_pushudata(L, tensor, torch_Tensor); - } else { - THTensor_(free)(tensor); - } - lua_pushboolean(L, 1); - return 2; - } - else if((mask = luaT_toudata(L, 2, "torch.ByteTensor"))) - { -#ifndef TH_REAL_IS_HALF - THTensor *vals = THTensor_(new)(); - THTensor_(maskedSelect)(vals, tensor, mask); - luaT_pushudata(L, vals, torch_Tensor); - lua_pushboolean(L, 1); - return 2; -#else - THError("ByteTensor based indexing not yetsupported with half type"); - return 0; -#endif - } - else - { - lua_pushboolean(L, 0); - return 1; - } -} - -static int torch_Tensor_(retain)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THTensor_(retain)(tensor); - return 0; -} - -static int torch_Tensor_(free)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THTensor_(free)(tensor); - return 0; -} - -/* helpful functions */ -static void torch_Tensor_(c_readSizeStride)(lua_State *L, int index, int allowStride, THLongStorage **size_, THLongStorage **stride_) -{ - THLongStorage *size = NULL; - THLongStorage *stride = NULL; - - if( (size = luaT_toudata(L, index, "torch.LongStorage")) ) - { - if(!lua_isnoneornil(L, index+1)) - { - if( (stride = luaT_toudata(L, index+1, "torch.LongStorage")) ) - THArgCheck(stride->size == size->size, index+1, "provided stride and size are inconsistent"); - else - THArgCheck(0, index+1, "torch.LongStorage expected"); - } - THLongStorage_retain(size); - if(stride) - THLongStorage_retain(stride); - } - else - { - int i; - - size = THLongStorage_newWithSize(8); - stride = THLongStorage_newWithSize(8); - THLongStorage_fill(size, -1); - THLongStorage_fill(stride, -1); - - if(allowStride) - { - for(i = 0; i < 8; i++) - { - if(lua_isnone(L, index+2*i)) - break; - size->data[i] = luaL_checklong(L, index+2*i); - - if(lua_isnone(L, index+2*i+1)) - break; - stride->data[i] = luaL_checklong(L, index+2*i+1); - } - } - else - { - for(i = 0; i < 8; i++) - { - if(lua_isnone(L, index+i)) - break; - size->data[i] = luaL_checklong(L, index+i); - } - } - } - - *size_ = size; - *stride_ = stride; -} - -static void torch_Tensor_(c_readTensorStorageSizeStride)(lua_State *L, int index, int allowNone, int allowTensor, int allowStorage, int allowStride, - THStorage **storage_, ptrdiff_t *storageOffset_, THLongStorage **size_, THLongStorage **stride_) -{ - THTensor *src = NULL; - THStorage *storage = NULL; - - int arg1Type = lua_type(L, index); - - if( allowNone && (arg1Type == LUA_TNONE) ) - { - *storage_ = NULL; - *storageOffset_ = 0; - *size_ = NULL; - *stride_ = NULL; - return; - } - else if( allowTensor && (arg1Type == LUA_TUSERDATA) && (src = luaT_toudata(L, index, torch_Tensor)) ) - { - *storage_ = src->storage; - *storageOffset_ = src->storageOffset; - *size_ = THTensor_(newSizeOf)(src); - *stride_ = THTensor_(newStrideOf)(src); - return; - } - else if( allowStorage && (arg1Type == LUA_TUSERDATA) && (storage = luaT_toudata(L, index, torch_Storage)) ) - { - *storage_ = storage; - if(lua_isnone(L, index+1)) - { - *storageOffset_ = 0; - *size_ = THLongStorage_newWithSize1(storage->size); - *stride_ = THLongStorage_newWithSize1(1); - } - else - { - *storageOffset_ = luaL_checkinteger(L, index+1)-1; - torch_Tensor_(c_readSizeStride)(L, index+2, allowStride, size_, stride_); - } - return; - } - else if( (arg1Type == LUA_TNUMBER) || (luaT_toudata(L, index, "torch.LongStorage")) ) - { - *storage_ = NULL; - *storageOffset_ = 0; - torch_Tensor_(c_readSizeStride)(L, index, 0, size_, stride_); - - return; - } - - *storage_ = NULL; - *storageOffset_ = 0; - if(allowTensor && allowStorage) - THArgCheck(0, index, "expecting number or " torch_Tensor " or " torch_Storage ); - else if(allowTensor) - THArgCheck(0, index, "expecting number or " torch_Tensor ); - else if(allowStorage) - THArgCheck(0, index, "expecting number or " torch_Storage ); - else - THArgCheck(0, index, "expecting number"); -} - -#ifndef TH_REAL_IS_HALF -static int torch_Tensor_(apply)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - luaL_checktype(L, 2, LUA_TFUNCTION); - lua_settop(L, 2); - - TH_TENSOR_APPLY(real, tensor, - lua_pushvalue(L, 2); - luaG_(pushreal)(L, *tensor_data); - lua_call(L, 1, 1); - if(lua_isnumber(L, 3)) - { - *tensor_data = luaG_(checkreal)(L, 3); - lua_pop(L, 1); - } - else if(lua_isnil(L, 3)) - lua_pop(L, 1); - else - THError("given function should return a number or nil");); - - lua_settop(L, 1); - return 1; -} - -static int torch_Tensor_(map)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THTensor *src = luaT_checkudata(L, 2, torch_Tensor); - luaL_checktype(L, 3, LUA_TFUNCTION); - lua_settop(L, 3); - - TH_TENSOR_APPLY2(real, tensor, real, src, - lua_pushvalue(L, 3); - luaG_(pushreal)(L, *tensor_data); - luaG_(pushreal)(L, *src_data); - lua_call(L, 2, 1); - if(lua_isnumber(L, 4)) - { - *tensor_data = luaG_(checkreal)(L, 4); - lua_pop(L, 1); - } - else if(lua_isnil(L, 4)) - lua_pop(L, 1); - else - THError("given function should return a number or nil");); - - lua_settop(L, 1); - return 1; -} - -static int torch_Tensor_(map2)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THTensor *src1 = luaT_checkudata(L, 2, torch_Tensor); - THTensor *src2 = luaT_checkudata(L, 3, torch_Tensor); - luaL_checktype(L, 4, LUA_TFUNCTION); - lua_settop(L, 4); - - TH_TENSOR_APPLY3(real, tensor, real, src1, real, src2, - lua_pushvalue(L, 4); - luaG_(pushreal)(L, *tensor_data); - luaG_(pushreal)(L, *src1_data); - luaG_(pushreal)(L, *src2_data); - lua_call(L, 3, 1); - if(lua_isnumber(L, 5)) - { - *tensor_data = luaG_(checkreal)(L, 5); - lua_pop(L, 1); - } - else if(lua_isnil(L, 5)) - lua_pop(L, 1); - else - THError("given function should return a number or nil");); - - lua_settop(L, 1); - return 1; -} -#endif - -static int torch_Tensor_(factory)(lua_State *L) -{ - THTensor *tensor = THTensor_(new)(); - luaT_pushudata(L, tensor, torch_Tensor); - return 1; -} - -static int torch_Tensor_(write)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THFile *file = luaT_checkudata(L, 2, "torch.File"); - - THFile_writeIntScalar(file, tensor->nDimension); - THFile_writeLongRaw(file, tensor->size, tensor->nDimension); - THFile_writeLongRaw(file, tensor->stride, tensor->nDimension); - THFile_writeLongScalar(file, tensor->storageOffset+1); /* to respect Lua convention */ - - lua_getfield(L, 2, "writeObject"); /* the method */ - lua_pushvalue(L, 2); /* the file */ - /* the storage */ - if(tensor->storage) - { - THStorage_(retain)(tensor->storage); - luaT_pushudata(L, tensor->storage, torch_Storage); - } - else - lua_pushnil(L); - - lua_call(L, 2, 0); /* call the method */ - - return 0; -} - -static int torch_Tensor_(read)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THFile *file = luaT_checkudata(L, 2, "torch.File"); - - tensor->nDimension = THFile_readIntScalar(file); - tensor->size = THAlloc(sizeof(long)*tensor->nDimension); - tensor->stride = THAlloc(sizeof(long)*tensor->nDimension); - THFile_readLongRaw(file, tensor->size, tensor->nDimension); - THFile_readLongRaw(file, tensor->stride, tensor->nDimension); - tensor->storageOffset = THFile_readLongScalar(file); - tensor->storageOffset--; /* to respect Lua convention */ - - lua_getfield(L, 2, "readObject"); /* the method */ - lua_pushvalue(L, 2); /* the file */ - lua_call(L, 1, 1); /* call the method */ - - tensor->storage = luaT_toudata(L, -1, torch_Storage); - if(tensor->storage) - THStorage_(retain)(tensor->storage); - - return 0; -} - -static const struct luaL_Reg torch_Tensor_(_) [] = { - {"retain", torch_Tensor_(retain)}, - {"free", torch_Tensor_(free)}, - {"contiguous", torch_Tensor_(contiguous)}, - {"size", torch_Tensor_(size)}, - {"elementSize", torch_Tensor_(elementSize)}, - {"__len__", torch_Tensor_(size)}, - {"stride", torch_Tensor_(stride)}, - {"dim", torch_Tensor_(nDimension)}, - {"nDimension", torch_Tensor_(nDimension)}, - {"set", torch_Tensor_(set)}, - {"storage", torch_Tensor_(storage)}, - {"storageOffset", torch_Tensor_(storageOffset)}, - {"clone", torch_Tensor_(clone)}, - {"contiguous", torch_Tensor_(contiguous)}, - {"resizeAs", torch_Tensor_(resizeAs)}, - {"resize", torch_Tensor_(resize)}, - {"narrow", torch_Tensor_(narrow)}, - {"sub", torch_Tensor_(sub)}, - {"select", torch_Tensor_(select)}, -#ifndef TH_REAL_IS_HALF - {"index", torch_Tensor_(indexSelect)}, - {"indexCopy", torch_Tensor_(indexCopy)}, - {"indexAdd", torch_Tensor_(indexAdd)}, - {"indexFill", torch_Tensor_(indexFill)}, - {"maskedSelect", torch_Tensor_(maskedSelect)}, - {"maskedCopy", torch_Tensor_(maskedCopy)}, - {"maskedFill", torch_Tensor_(maskedFill)}, -#endif - {"transpose", torch_Tensor_(transpose)}, - {"t", torch_Tensor_(t)}, - {"unfold", torch_Tensor_(unfold)}, - {"isContiguous", torch_Tensor_(isContiguous)}, - {"isSameSizeAs", torch_Tensor_(isSameSizeAs)}, - {"isSetTo", torch_Tensor_(isSetTo)}, - {"isSize", torch_Tensor_(isSize)}, - {"nElement", torch_Tensor_(nElement)}, - {"copy", torch_Tensor_(copy)}, -#ifndef TH_REAL_IS_HALF - {"apply", torch_Tensor_(apply)}, - {"map", torch_Tensor_(map)}, - {"map2", torch_Tensor_(map2)}, -#endif - {"read", torch_Tensor_(read)}, - {"write", torch_Tensor_(write)}, - {"__index__", torch_Tensor_(__index__)}, - {"__newindex__", torch_Tensor_(__newindex__)}, - {NULL, NULL} -}; - -void torch_Tensor_(init)(lua_State *L) -{ - luaT_newmetatable(L, torch_Tensor, NULL, - torch_Tensor_(new), torch_Tensor_(free), torch_Tensor_(factory)); - luaT_setfuncs(L, torch_Tensor_(_), 0); - lua_pop(L, 1); -#ifndef TH_REAL_IS_HALF - THVector_(vectorDispatchInit)(); -#endif -} - -#endif diff --git a/contrib/lua-torch/torch7/generic/TensorOperator.c b/contrib/lua-torch/torch7/generic/TensorOperator.c deleted file mode 100644 index 37b2a0889b..0000000000 --- a/contrib/lua-torch/torch7/generic/TensorOperator.c +++ /dev/null @@ -1,193 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/TensorOperator.c" -#else - -#include "luaG.h" - -static int torch_TensorOperator_(__add__)(lua_State *L) -{ - THTensor *tensor1 = luaT_toudata(L, 1, torch_Tensor); - THTensor *tensor2 = luaT_toudata(L, 2, torch_Tensor); - THTensor *r; - - if(!tensor1 && !tensor2) - luaL_error(L, "expecting two " torch_Tensor "s or one " torch_Tensor " and one number"); - else - { - r = THTensor_(new)(); - luaT_pushudata(L, r, torch_Tensor); - - if(!tensor1 && tensor2) - { - THTensor_(resizeAs)(r, tensor2); - THTensor_(copy)(r, tensor2); - THTensor_(add)(r, r, luaG_(checkreal)(L, 1)); - } - else if(tensor1 && !tensor2) - { - THTensor_(resizeAs)(r, tensor1); - THTensor_(copy)(r, tensor1); - THTensor_(add)(r, r, luaG_(checkreal)(L, 2)); - } - else - { - THTensor_(resizeAs)(r, tensor1); - THTensor_(copy)(r, tensor1); - THTensor_(cadd)(r, r, 1, tensor2); - } - } - return 1; -} - -static int torch_TensorOperator_(__sub__)(lua_State *L) -{ - THTensor *tensor1 = luaT_toudata(L, 1, torch_Tensor); - THTensor *tensor2 = luaT_toudata(L, 2, torch_Tensor); - THTensor *r; - - if(!tensor1 && !tensor2) - luaL_error(L, "expecting two " torch_Tensor "s or one " torch_Tensor " and one number"); - else - { - r = THTensor_(new)(); - luaT_pushudata(L, r, torch_Tensor); - - if(!tensor1 && tensor2) - { - THTensor_(resizeAs)(r, tensor2); - THTensor_(fill)(r, luaG_(checkreal)(L, 1)); - THTensor_(cadd)(r, r, -1, tensor2); - } - else if(tensor1 && !tensor2) - { - THTensor_(resizeAs)(r, tensor1); - THTensor_(copy)(r, tensor1); - THTensor_(add)(r, r, -luaG_(checkreal)(L, 2)); - } - else - { - THTensor_(resizeAs)(r, tensor1); - THTensor_(copy)(r, tensor1); - THTensor_(cadd)(r, r, -1, tensor2); - } - } - return 1; -} - -static int torch_TensorOperator_(__unm__)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THTensor *r; - - r = THTensor_(new)(); - luaT_pushudata(L, r, torch_Tensor); - THTensor_(resizeAs)(r, tensor); - THTensor_(copy)(r, tensor); - THTensor_(mul)(r, r, -1); - - return 1; -} - -static int torch_TensorOperator_(__mul__)(lua_State *L) -{ - THTensor *tensor1 = luaT_toudata(L, 1, torch_Tensor); - THTensor *tensor2 = luaT_toudata(L, 2, torch_Tensor); - THTensor *r; - - if(!tensor1 && !tensor2) - luaL_error(L, "expecting two " torch_Tensor "s or one " torch_Tensor " and one number"); - else - { - r = THTensor_(new)(); - luaT_pushudata(L, r, torch_Tensor); - - if(!tensor1 && tensor2) - { - THTensor_(resizeAs)(r, tensor2); - THTensor_(copy)(r, tensor2); - THTensor_(mul)(r, r, luaG_(checkreal)(L, 1)); - } - else if(tensor1 && !tensor2) - { - THTensor_(resizeAs)(r, tensor1); - THTensor_(copy)(r, tensor1); - THTensor_(mul)(r, r, luaG_(checkreal)(L, 2)); - } - else - { - int dimt = tensor1->nDimension; - int dims = tensor2->nDimension; - - if(dimt == 1 && dims == 1) - luaG_(pushreal)(L, THTensor_(dot)(tensor1, tensor2)); /* ok, we wasted r, but who cares */ - else if(dimt == 2 && dims == 1) - { - THTensor_(resize1d)(r, tensor1->size[0]); - THTensor_(zero)(r); - THTensor_(addmv)(r, 1, r, 1, tensor1, tensor2); - } - else if(dimt == 2 && dims == 2) - { - THTensor_(resize2d)(r, tensor1->size[0], tensor2->size[1]); - THTensor_(zero)(r); - THTensor_(addmm)(r, 1, r, 1, tensor1, tensor2); - } - else - luaL_error(L, "multiplication between %dD and %dD tensors not yet supported", tensor1->nDimension, tensor2->nDimension); - } - } - return 1; -} - -static int torch_TensorOperator_(__div__)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THTensor *r; - - THArgCheck(lua_isnumber(L,2), 2, "number expected"); - - r = THTensor_(new)(); - luaT_pushudata(L, r, torch_Tensor); - - THTensor_(resizeAs)(r, tensor); - THTensor_(copy)(r, tensor); - THTensor_(div)(r, r, lua_tonumber(L, 2)); - - return 1; -} - -static int torch_TensorOperator_(__mod__)(lua_State *L) -{ - THTensor *tensor = luaT_checkudata(L, 1, torch_Tensor); - THTensor *r; - - THArgCheck(lua_isnumber(L,2), 2, "number expected"); - - r = THTensor_(new)(); - luaT_pushudata(L, r, torch_Tensor); - - THTensor_(resizeAs)(r, tensor); - THTensor_(copy)(r, tensor); - THTensor_(remainder)(r, r, lua_tonumber(L, 2)); - - return 1; -} - -static const struct luaL_Reg torch_TensorOperator_(_) [] = { - {"__add__", torch_TensorOperator_(__add__)}, - {"__sub__", torch_TensorOperator_(__sub__)}, - {"__unm__", torch_TensorOperator_(__unm__)}, - {"__mul__", torch_TensorOperator_(__mul__)}, - {"__div__", torch_TensorOperator_(__div__)}, - {"__mod__", torch_TensorOperator_(__mod__)}, - {NULL, NULL} -}; - -void torch_TensorOperator_(init)(lua_State *L) -{ - luaT_pushmetatable(L, torch_Tensor); - luaT_setfuncs(L, torch_TensorOperator_(_), 0); - lua_pop(L, 1); -} - -#endif diff --git a/contrib/lua-torch/torch7/generic/luaG.h b/contrib/lua-torch/torch7/generic/luaG.h deleted file mode 100644 index f1ffce29e6..0000000000 --- a/contrib/lua-torch/torch7/generic/luaG.h +++ /dev/null @@ -1,62 +0,0 @@ -#if !defined(real) || !defined(TH_GENERIC_FILE) -#error "luaG.h must not be included outside of a generic file." -#endif - -#ifndef luaG_ -#define luaG_(NAME) TH_CONCAT_3(luaG_,Real,NAME) -#endif - -#undef REAL_TO_LUA_NUMBER -#undef LUA_NUMBER_TO_REAL - -#if defined(TH_REAL_IS_HALF) -# define REAL_TO_LUA_NUMBER(n) (lua_Number)TH_half2float(n) -# define LUA_NUMBER_TO_REAL(n) TH_float2half((lua_Number)n) -#else -# define REAL_TO_LUA_NUMBER(n) (lua_Number)(n) -# define LUA_NUMBER_TO_REAL(n) (real)n -#endif - - - -static void luaG_(pushreal)(lua_State *L, real n) { -#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) || LUA_VERSION_NUM < 503 - lua_pushnumber(L, REAL_TO_LUA_NUMBER(n)); -#elif defined(TH_REAL_IS_BYTE) || defined(TH_REAL_IS_CHAR) || defined(TH_REAL_IS_SHORT) \ - || defined(TH_REAL_IS_INT) || defined(TH_REAL_IS_LONG) - lua_pushinteger(L, (lua_Integer)n); -#else - #error "unhandled real type in luaG_pushreal" -#endif -} - -static real luaG_(checkreal)(lua_State *L, int idx) { -#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) - return LUA_NUMBER_TO_REAL(luaL_checknumber(L, idx)); -#elif defined(TH_REAL_IS_BYTE) || defined(TH_REAL_IS_CHAR) || defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT) || defined(TH_REAL_IS_LONG) - int type = lua_type(L, idx); - if (type == LUA_TSTRING) { - const char *str = lua_tolstring(L, idx, NULL); - long int num = strtol(str, NULL, 0); - return (real) num; - } else { -#if LUA_VERSION_NUM < 503 - return (lua_Number)luaL_checkinteger(L, idx); -#else - return (lua_Integer)luaL_checkinteger(L, idx); -#endif - } -#else - #error "unhandled real type in luaG_checkreal" -#endif -} - -static real luaG_(optreal)(lua_State *L, int idx, real n) { -#if defined(TH_REAL_IS_HALF) || defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || LUA_VERSION_NUM < 503 - return LUA_NUMBER_TO_REAL(luaL_optnumber(L, idx, REAL_TO_LUA_NUMBER(n))); -#elif defined(TH_REAL_IS_BYTE) || defined(TH_REAL_IS_CHAR) || defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT) || defined(TH_REAL_IS_LONG) - return (lua_Integer)luaL_optinteger(L, idx, (lua_Integer)n); -#else - #error "unhandled real type in luaG_checkreal" -#endif -} diff --git a/contrib/lua-torch/torch7/init.c b/contrib/lua-torch/torch7/init.c deleted file mode 100644 index 3bdac176dc..0000000000 --- a/contrib/lua-torch/torch7/init.c +++ /dev/null @@ -1,93 +0,0 @@ -#include "general.h" -#include "utils.h" - -extern void torch_utils_init(lua_State *L); -extern void torch_random_init(lua_State *L); -extern void torch_File_init(lua_State *L); -extern void torch_DiskFile_init(lua_State *L); -extern void torch_MemoryFile_init(lua_State *L); -extern void torch_PipeFile_init(lua_State *L); -extern void torch_Timer_init(lua_State *L); - -extern void torch_ByteStorage_init(lua_State *L); -extern void torch_CharStorage_init(lua_State *L); -extern void torch_ShortStorage_init(lua_State *L); -extern void torch_IntStorage_init(lua_State *L); -extern void torch_LongStorage_init(lua_State *L); -extern void torch_FloatStorage_init(lua_State *L); -extern void torch_DoubleStorage_init(lua_State *L); -extern void torch_HalfStorage_init(lua_State *L); - -extern void torch_ByteTensor_init(lua_State *L); -extern void torch_CharTensor_init(lua_State *L); -extern void torch_ShortTensor_init(lua_State *L); -extern void torch_IntTensor_init(lua_State *L); -extern void torch_LongTensor_init(lua_State *L); -extern void torch_FloatTensor_init(lua_State *L); -extern void torch_DoubleTensor_init(lua_State *L); -extern void torch_HalfTensor_init(lua_State *L); - -extern void torch_ByteTensorOperator_init(lua_State *L); -extern void torch_CharTensorOperator_init(lua_State *L); -extern void torch_ShortTensorOperator_init(lua_State *L); -extern void torch_IntTensorOperator_init(lua_State *L); -extern void torch_LongTensorOperator_init(lua_State *L); -extern void torch_FloatTensorOperator_init(lua_State *L); -extern void torch_DoubleTensorOperator_init(lua_State *L); - - -extern void torch_TensorMath_init(lua_State *L); - - -LUA_EXTERNC DLL_EXPORT int luaopen_libtorch(lua_State *L); - -int luaopen_libtorch(lua_State *L) -{ - - lua_newtable(L); - lua_pushvalue(L, -1); - lua_setglobal(L, "torch"); - - torch_utils_init(L); - torch_File_init(L); - - torch_ByteStorage_init(L); - torch_CharStorage_init(L); - torch_ShortStorage_init(L); - torch_IntStorage_init(L); - torch_LongStorage_init(L); - torch_FloatStorage_init(L); - torch_DoubleStorage_init(L); - torch_HalfStorage_init(L); - - torch_ByteTensor_init(L); - torch_CharTensor_init(L); - torch_ShortTensor_init(L); - torch_IntTensor_init(L); - torch_LongTensor_init(L); - torch_FloatTensor_init(L); - torch_DoubleTensor_init(L); - torch_HalfTensor_init(L); - - torch_ByteTensorOperator_init(L); - torch_CharTensorOperator_init(L); - torch_ShortTensorOperator_init(L); - torch_IntTensorOperator_init(L); - torch_LongTensorOperator_init(L); - torch_FloatTensorOperator_init(L); - torch_DoubleTensorOperator_init(L); - - torch_Timer_init(L); - torch_DiskFile_init(L); - torch_PipeFile_init(L); - torch_MemoryFile_init(L); - - torch_TensorMath_init(L); - - torch_random_init(L); - - // Create 'torch.Allocator' type. - luaT_newmetatable(L, "torch.Allocator", NULL, NULL, NULL, NULL); - - return 1; -} diff --git a/contrib/lua-torch/torch7/init.lua b/contrib/lua-torch/torch7/init.lua deleted file mode 100644 index 0f3cfbb1e0..0000000000 --- a/contrib/lua-torch/torch7/init.lua +++ /dev/null @@ -1,192 +0,0 @@ --- We are using paths.require to appease mkl - --- Make this work with LuaJIT in Lua 5.2 compatibility mode, which --- renames string.gfind (already deprecated in 5.1) -if not string.gfind then - string.gfind = string.gmatch -end -if not table.unpack then - table.unpack = unpack -end - -require "paths" -paths.require "libtorch" - --- Keep track of all thread local variables torch. --- if a Lua VM is passed to another thread thread local --- variables need to be updated. -function torch.updatethreadlocals() - torch.updateerrorhandlers() - local tracking = torch._heaptracking - if tracking == nil then tracking = false end - torch.setheaptracking(tracking) -end - ---- package stuff -function torch.packageLuaPath(name) - if not name then - local ret = string.match(torch.packageLuaPath('torch'), '(.*)/') - if not ret then --windows? - ret = string.match(torch.packageLuaPath('torch'), '(.*)\\') - end - return ret - end - for path in string.gmatch(package.path, "[^;]+") do - path = string.gsub(path, "%?", name) - local f = io.open(path) - if f then - f:close() - local ret = string.match(path, "(.*)/") - if not ret then --windows? - ret = string.match(path, "(.*)\\") - end - return ret - end - end -end - -local function include(file, depth) - paths.dofile(file, 3 + (depth or 0)) -end -rawset(_G, 'include', include) - -function torch.include(package, file) - dofile(torch.packageLuaPath(package) .. '/' .. file) -end - -function torch.class(...) - local tname, parenttname, module - if select('#', ...) == 3 - and type(select(1, ...)) == 'string' - and type(select(2, ...)) == 'string' - and type(select(3, ...)) == 'table' - then - tname = select(1, ...) - parenttname = select(2, ...) - module = select(3, ...) - elseif select('#', ...) == 2 - and type(select(1, ...)) == 'string' - and type(select(2, ...)) == 'string' - then - tname = select(1, ...) - parenttname = select(2, ...) - elseif select('#', ...) == 2 - and type(select(1, ...)) == 'string' - and type(select(2, ...)) == 'table' - then - tname = select(1, ...) - module = select(2, ...) - elseif select('#', ...) == 1 - and type(select(1, ...)) == 'string' - then - tname = select(1, ...) - else - error(' [] [] expected') - end - - local function constructor(...) - local self = {} - torch.setmetatable(self, tname) - if self.__init then - self:__init(...) - end - return self - end - - local function factory() - local self = {} - torch.setmetatable(self, tname) - return self - end - - local mt = torch.newmetatable(tname, parenttname, constructor, nil, factory, module) - local mpt - if parenttname then - mpt = torch.getmetatable(parenttname) - end - return mt, mpt -end - -function torch.setdefaulttensortype(typename) - assert(type(typename) == 'string', 'string expected') - if torch.getconstructortable(typename) then - torch.Tensor = torch.getconstructortable(typename) - torch.Storage = torch.getconstructortable(torch.typename(torch.Tensor(1):storage())) - else - error(string.format("<%s> is not a string describing a torch object", typename)) - end -end - -function torch.type(obj) - local class = torch.typename(obj) - if not class then - class = type(obj) - end - return class -end - ---[[ See if a given object is an instance of the provided torch class. ]] -function torch.isTypeOf(obj, typeSpec) - -- typeSpec can be provided as either a string, pattern, or the constructor. - -- If the constructor is used, we look in the __typename field of the - -- metatable to find a string to compare to. - if type(typeSpec) ~= 'string' then - typeSpec = getmetatable(typeSpec).__typename - assert(type(typeSpec) == 'string', - "type must be provided as [regexp] string, or factory") - end - - local mt = getmetatable(obj) - while mt do - if type(mt) == 'table' and mt.__typename then - local match = mt.__typename:match(typeSpec) - -- Require full match for non-pattern specs - if match and (match ~= typeSpec or match == mt.__typename) then - return true - end - end - mt = getmetatable(mt) - end - return false -end - -torch.setdefaulttensortype('torch.DoubleTensor') - -require('torch.Tensor') -require('torch.File') -require('torch.CmdLine') -require('torch.FFInterface') -require('torch.Tester') -require('torch.TestSuite') -require('torch.test') - -function torch.totable(obj) - if torch.isTensor(obj) or torch.isStorage(obj) then - return obj:totable() - else - error("obj must be a Storage or a Tensor") - end -end - -function torch.isTensor(obj) - local typename = torch.typename(obj) - if typename and typename:find('torch.*Tensor') then - return true - end - return false -end - -function torch.isStorage(obj) - local typename = torch.typename(obj) - if typename and typename:find('torch.*Storage') then - return true - end - return false -end --- alias for convenience -torch.Tensor.isTensor = torch.isTensor - --- remove this line to disable automatic heap-tracking for garbage collection -torch.setheaptracking(true) - -return torch diff --git a/contrib/lua-torch/torch7/lib/CMakeLists.txt b/contrib/lua-torch/torch7/lib/CMakeLists.txt deleted file mode 100644 index d6a0e2c9c0..0000000000 --- a/contrib/lua-torch/torch7/lib/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -SET(TH_INSTALL_BIN_SUBDIR "${BINDIR}") -SET(TH_INSTALL_LIB_SUBDIR "${RSPAMD_LIBDIR}") -SET(TH_INSTALL_INCLUDE_SUBDIR "${Torch_INSTALL_INCLUDE_SUBDIR}") -SET(TH_INSTALL_CMAKE_SUBDIR "${Torch_INSTALL_CMAKE_SUBDIR}") - -ADD_SUBDIRECTORY(TH) -ADD_SUBDIRECTORY(luaT) diff --git a/contrib/lua-torch/torch7/lib/TH/CMakeLists.txt b/contrib/lua-torch/torch7/lib/TH/CMakeLists.txt deleted file mode 100644 index f7e0bf9bb3..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/CMakeLists.txt +++ /dev/null @@ -1,296 +0,0 @@ -cmake_minimum_required(VERSION 2.6) - -# avoid some cmake warnings - -LIST(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") -SET(CMAKE_LIBRARY_PATH /usr/lib/x86_64-linux-gnu/ ${CMAKE_LIBRARY_PATH}) - -####################################################################### -##### flags section -###################################################################### - -IF(MSVC) - # MSVC now supports C99 since VS2013/VS2015, however the standard version switch is not provided yet - # SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /std:c99") -ELSE(MSVC) - # enable gnu99 and not c99 because we use - # gnu extensions like posix_memalign - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") -ENDIF(MSVC) - -IF(MSVC) - ADD_DEFINITIONS(-D_CRT_SECURE_NO_DEPRECATE=1) # respect the standard -ENDIF(MSVC) -SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w") -IF(UNIX) - # prevent Unknown CMake command "check_function_exists". - INCLUDE(CheckFunctionExists) -ENDIF(UNIX) - -# OpenMP support? - -IF (WITH_OPENMP) - FIND_PACKAGE(OpenMP) - IF(OPENMP_FOUND) - MESSAGE(STATUS "Compiling with OpenMP support") - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") - SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}") - ENDIF(OPENMP_FOUND) -ENDIF (WITH_OPENMP) - -# ARM specific flags -FIND_PACKAGE(ARM) -IF (ASIMD_FOUND) - MESSAGE(STATUS "asimd/Neon found with compiler flag : -D__NEON__") - SET(CMAKE_C_FLAGS "-D__NEON__ ${CMAKE_C_FLAGS}") -ELSEIF (NEON_FOUND) - MESSAGE(STATUS "Neon found with compiler flag : -mfpu=neon -D__NEON__") - SET(CMAKE_C_FLAGS "-mfpu=neon -D__NEON__ ${CMAKE_C_FLAGS}") -ENDIF (ASIMD_FOUND) -IF (CORTEXA8_FOUND) - MESSAGE(STATUS "Cortex-A8 Found with compiler flag : -mcpu=cortex-a8") - SET(CMAKE_C_FLAGS "-mcpu=cortex-a8 -fprefetch-loop-arrays ${CMAKE_C_FLAGS}") -ENDIF (CORTEXA8_FOUND) -IF (CORTEXA9_FOUND) - MESSAGE(STATUS "Cortex-A9 Found with compiler flag : -mcpu=cortex-a9") - SET(CMAKE_C_FLAGS "-mcpu=cortex-a9 ${CMAKE_C_FLAGS}") -ENDIF (CORTEXA9_FOUND) - -INCLUDE (CheckIncludeFile) -INCLUDE (CheckCSourceCompiles) -CHECK_INCLUDE_FILE(cpuid.h HAVE_CPUID_H) -# Check for a cpuid intrinsic -IF(HAVE_CPUID_H) - CHECK_C_SOURCE_COMPILES("#include - int main() - { - unsigned int eax, ebx, ecx, edx; - return __get_cpuid(0, &eax, &ebx, &ecx, &edx); - }" HAVE_GCC_GET_CPUID) -ENDIF() -IF(HAVE_GCC_GET_CPUID) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DHAVE_GCC_GET_CPUID") -ENDIF(HAVE_GCC_GET_CPUID) - -CHECK_C_SOURCE_COMPILES("#include - static inline void cpuid(uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) - { - uint32_t a = *eax, b, c = *ecx, d; - asm volatile ( \"cpuid\" : \"+a\"(a), \"=b\"(b), \"+c\"(c), \"=d\"(d) ); - *eax = a; *ebx = b; *ecx = c; *edx = d; - } - int main() { - uint32_t a,b,c,d; - cpuid(&a, &b, &c, &d); - return 0; - }" NO_GCC_EBX_FPIC_BUG) - -IF(NOT NO_GCC_EBX_FPIC_BUG) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DUSE_GCC_GET_CPUID") -ENDIF(NOT NO_GCC_EBX_FPIC_BUG) - - -FIND_PACKAGE(SSE) # checks SSE, AVX and AVX2 -IF(C_SSE2_FOUND) - MESSAGE(STATUS "SSE2 Found") - SET(CMAKE_C_FLAGS "${C_SSE2_FLAGS} -DUSE_SSE2 ${CMAKE_C_FLAGS}") -ENDIF(C_SSE2_FOUND) -IF(C_SSE3_FOUND) - MESSAGE(STATUS "SSE3 Found") - SET(CMAKE_C_FLAGS "${C_SSE3_FLAGS} -DUSE_SSE3 ${CMAKE_C_FLAGS}") -ENDIF(C_SSE3_FOUND) -# we dont set -mavx and -mavx2 flags globally, but only for specific files -# however, we want to enable the AVX codepaths, so we still need to -# add USE_AVX and USE_AVX2 macro defines -IF(FALSE) -IF(C_AVX_FOUND) - MESSAGE(STATUS "AVX Found") - SET(CMAKE_C_FLAGS "-DUSE_AVX ${CMAKE_C_FLAGS}") -ENDIF(C_AVX_FOUND) -IF(C_AVX2_FOUND) - MESSAGE(STATUS "AVX2 Found") - SET(CMAKE_C_FLAGS "-DUSE_AVX2 ${CMAKE_C_FLAGS}") -ENDIF(C_AVX2_FOUND) -ENDIF() - -CHECK_C_SOURCE_RUNS(" -#include -int main() -{ - int a; - int oa; - atomic_store(&a, 1); - atomic_fetch_add(&a, 1); - oa = atomic_load(&a); - if(!atomic_compare_exchange_strong(&a, &oa, 3)) - return -1; - return 0; -} -" HAS_C11_ATOMICS) - -IF(NOT HAS_C11_ATOMICS) - CHECK_C_SOURCE_RUNS(" -#include -int main() -{ - long a; - _InterlockedExchange(&a, 1); - _InterlockedExchangeAdd(&a, 1); - if(_InterlockedCompareExchange(&a, 3, 2) != 2) - return -1; - return 0; -} -" HAS_MSC_ATOMICS) - - CHECK_C_SOURCE_RUNS(" -int main() -{ - int a; - __sync_lock_test_and_set(&a, 1); - __sync_fetch_and_add(&a, 1); - if(!__sync_bool_compare_and_swap(&a, 2, 3)) - return -1; - return 0; -} -" HAS_GCC_ATOMICS) -ENDIF() - -####################################################################### -##### sources section -###################################################################### - -# IF ANY SIMD FOUND -IF ("${ARCH}" STREQUAL "x86_64") - SET(simd generic/simd/convolve.c generic/simd/convolve5x5_sse.c) - SET(CMAKE_C_FLAGS "-DUSE_SSE2 ${CMAKE_C_FLAGS}") - SET_SOURCE_FILES_PROPERTIES(generic/simd/convolve5x5_sse.c PROPERTIES COMPILE_FLAGS "-O3 -ffast-math") -ENDIF() - - -# IF AVX FOUND -IF(FALSE) -IF(C_AVX_FOUND) - IF(MSVC) - SET_SOURCE_FILES_PROPERTIES(generic/simd/convolve5x5_avx.c PROPERTIES COMPILE_FLAGS "/Ox /fp:fast ${C_AVX_FLAGS}") - SET_SOURCE_FILES_PROPERTIES(vector/AVX.c PROPERTIES COMPILE_FLAGS "/Ox /arch:AVX ${C_AVX_FLAGS}") - ELSE(MSVC) - SET_SOURCE_FILES_PROPERTIES(generic/simd/convolve5x5_avx.c PROPERTIES COMPILE_FLAGS "-O3 -ffast-math ${C_AVX_FLAGS}") - SET_SOURCE_FILES_PROPERTIES(vector/AVX.c PROPERTIES COMPILE_FLAGS "-O3 ${C_AVX_FLAGS}") - ENDIF(MSVC) - SET(simd ${simd} vector/AVX.c generic/simd/convolve5x5_avx.c) -ENDIF(C_AVX_FOUND) - -IF(C_AVX2_FOUND) - IF(MSVC) - SET_SOURCE_FILES_PROPERTIES(vector/AVX2.c PROPERTIES COMPILE_FLAGS "/Ox /arch:AVX2 ${C_AVX2_FLAGS}") - ELSE(MSVC) - SET_SOURCE_FILES_PROPERTIES(vector/AVX2.c PROPERTIES COMPILE_FLAGS "-O3 ${C_AVX2_FLAGS}") - ENDIF(MSVC) - SET(simd ${simd} vector/AVX2.c) -ENDIF(C_AVX2_FOUND) -ENDIF() - -SET(hdr - THGeneral.h THHalf.h THAllocator.h THSize.h THStorage.h THTensor.h THTensorApply.h THBlas.h THMath.h - THLapack.h THLogAdd.h THRandom.h THVector.h THAtomic.h ) - -SET(src - THGeneral.c THHalf.c THAllocator.c THSize.c THStorage.c THTensor.c THBlas.c THLapack.c - THLogAdd.c THRandom.c THFile.c THDiskFile.c THMemoryFile.c THAtomic.c THVector.c) - -SET(src ${src} ${hdr} ${simd}) - -####################################################################### -##### build section -###################################################################### - -ADD_TORCH_LIBRARY(TH SHARED "${src}") - -IF(HAS_C11_ATOMICS) - ADD_DEFINITIONS(-DUSE_C11_ATOMICS=1) - MESSAGE(STATUS "Atomics: using C11 intrinsics") -ELSEIF(HAS_MSC_ATOMICS) - ADD_DEFINITIONS(-DUSE_MSC_ATOMICS=1) - MESSAGE(STATUS "Atomics: using MSVC intrinsics") -ELSEIF(HAS_GCC_ATOMICS) - ADD_DEFINITIONS(-DUSE_GCC_ATOMICS=1) - MESSAGE(STATUS "Atomics: using GCC intrinsics") -ELSE() - SET(CMAKE_THREAD_PREFER_PTHREAD TRUE) - FIND_PACKAGE(Threads) - IF(THREADS_FOUND) - ADD_DEFINITIONS(-DUSE_PTHREAD_ATOMICS=1) - TARGET_LINK_LIBRARIES(TH ${CMAKE_THREAD_LIBS_INIT}) - MESSAGE(STATUS "Atomics: using pthread") - ENDIF() -ENDIF() - -FIND_PACKAGE(BLAS) -IF(BLAS_FOUND) - SET(USE_BLAS 1) - TARGET_LINK_LIBRARIES(TH ${BLAS_LIBRARIES}) - IF(BLAS_INFO STREQUAL "mkl") - ADD_DEFINITIONS(-DTH_BLAS_MKL) - ELSEIF(BLAS_INFO STREQUAL "open") - ADD_DEFINITIONS(-DTH_BLAS_OPEN) - ENDIF() -ENDIF(BLAS_FOUND) - -FIND_PACKAGE(LAPACK) -IF(LAPACK_FOUND) - SET(USE_LAPACK 1) - TARGET_LINK_LIBRARIES(TH ${LAPACK_LIBRARIES}) -ENDIF(LAPACK_FOUND) - -IF (UNIX AND NOT APPLE) - INCLUDE(CheckLibraryExists) - # https://github.com/libgit2/libgit2/issues/2128#issuecomment-35649830 - CHECK_LIBRARY_EXISTS(rt clock_gettime "time.h" NEED_LIBRT) - IF(NEED_LIBRT) - TARGET_LINK_LIBRARIES(TH rt) - SET(CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} rt) - ENDIF(NEED_LIBRT) -ENDIF(UNIX AND NOT APPLE) - -IF(UNIX) - SET(CMAKE_EXTRA_INCLUDE_FILES "sys/mman.h") - CHECK_FUNCTION_EXISTS(mmap HAVE_MMAP) - IF(HAVE_MMAP) - ADD_DEFINITIONS(-DHAVE_MMAP=1) - ENDIF(HAVE_MMAP) - # done for lseek: https://www.gnu.org/software/libc/manual/html_node/File-Position-Primitive.html - ADD_DEFINITIONS(-D_FILE_OFFSET_BITS=64) - CHECK_FUNCTION_EXISTS(shm_open HAVE_SHM_OPEN) - IF(HAVE_SHM_OPEN) - ADD_DEFINITIONS(-DHAVE_SHM_OPEN=1) - ENDIF(HAVE_SHM_OPEN) - CHECK_FUNCTION_EXISTS(shm_unlink HAVE_SHM_UNLINK) - IF(HAVE_SHM_UNLINK) - ADD_DEFINITIONS(-DHAVE_SHM_UNLINK=1) - ENDIF(HAVE_SHM_UNLINK) - CHECK_FUNCTION_EXISTS(malloc_usable_size HAVE_MALLOC_USABLE_SIZE) - IF(HAVE_MALLOC_USABLE_SIZE) - ADD_DEFINITIONS(-DHAVE_MALLOC_USABLE_SIZE=1) - ENDIF(HAVE_MALLOC_USABLE_SIZE) -ENDIF(UNIX) - -IF(NOT MSVC) - TARGET_LINK_LIBRARIES(TH m) -ENDIF(NOT MSVC) - -# Is __thread supported? -IF(NOT MSVC) - CHECK_C_SOURCE_COMPILES("static __thread int x = 1; int main() { return x; }" C_HAS_THREAD) -ELSE(NOT MSVC) - CHECK_C_SOURCE_COMPILES("static __declspec( thread ) int x = 1; int main() { return x; }" C_HAS_THREAD) -ENDIF(NOT MSVC) -IF(NOT C_HAS_THREAD) - MESSAGE(STATUS "Warning: __thread is not supported, generating thread-unsafe code") -ELSE(NOT C_HAS_THREAD) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DTH_HAVE_THREAD") -ENDIF(NOT C_HAS_THREAD) - -INCLUDE_DIRECTORIES("${CMAKE_CURRENT_BINARY_DIR}") -CONFIGURE_FILE(THGeneral.h.in "${CMAKE_CURRENT_BINARY_DIR}/THGeneral.h") diff --git a/contrib/lua-torch/torch7/lib/TH/README.md b/contrib/lua-torch/torch7/lib/TH/README.md deleted file mode 100644 index 4ac26c1032..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Environment variables control the disabling of certain explicit SIMD optimizations. - -``` -x64 options: -TH_NO_AVX2=1 # disable AVX2 codepaths -TH_NO_AVX=1 # disable AVX codepaths -TH_NO_SSE=1 # disable SSE codepaths - -ppc64le options: -TH_NO_VSX=1 # disable VSX codepaths -``` diff --git a/contrib/lua-torch/torch7/lib/TH/TH.h b/contrib/lua-torch/torch7/lib/TH/TH.h deleted file mode 100644 index 11f208c4b1..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/TH.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef TH_INC -#define TH_INC - -#include "THGeneral.h" - -#include "THBlas.h" -#ifdef USE_LAPACK -#include "THLapack.h" -#endif - -#include "THAtomic.h" -#include "THVector.h" -#include "THLogAdd.h" -#include "THRandom.h" -#include "THSize.h" -#include "THStorage.h" -#include "THTensor.h" -#include "THTensorApply.h" -#include "THTensorDimApply.h" - -#include "THFile.h" -#include "THDiskFile.h" -#include "THMemoryFile.h" - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THAllocator.c b/contrib/lua-torch/torch7/lib/TH/THAllocator.c deleted file mode 100644 index 51ac69b943..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THAllocator.c +++ /dev/null @@ -1,500 +0,0 @@ -#include "THAllocator.h" -#include "THAtomic.h" - -/* stuff for mapped files */ -#ifdef _WIN32 -#include -#endif - -#if HAVE_MMAP -#include -#include -#include -#include -#include -#endif -/* end of stuff for mapped files */ - -static void *THDefaultAllocator_alloc(void* ctx, ptrdiff_t size) { - return THAlloc(size); -} - -static void *THDefaultAllocator_realloc(void* ctx, void* ptr, ptrdiff_t size) { - return THRealloc(ptr, size); -} - -static void THDefaultAllocator_free(void* ctx, void* ptr) { - THFree(ptr); -} - -THAllocator THDefaultAllocator = { - &THDefaultAllocator_alloc, - &THDefaultAllocator_realloc, - &THDefaultAllocator_free -}; - -#if defined(_WIN32) || defined(HAVE_MMAP) - -struct THMapAllocatorContext_ { - char *filename; /* file name */ - int flags; - ptrdiff_t size; /* mapped size */ - int fd; -}; - -#define TH_ALLOC_ALIGNMENT 64 - -typedef struct { - int refcount; -} THMapInfo; - -char * unknown_filename = "filename not specified"; - -THMapAllocatorContext *THMapAllocatorContext_new(const char *filename, int flags) -{ - THMapAllocatorContext *ctx = THAlloc(sizeof(THMapAllocatorContext)); - - if (!(flags & TH_ALLOCATOR_MAPPED_SHARED) && !(flags & TH_ALLOCATOR_MAPPED_SHAREDMEM)) - flags &= ~TH_ALLOCATOR_MAPPED_NOCREATE; - if ((flags ^ TH_ALLOCATOR_MAPPED_EXCLUSIVE) == 0) - THError("TH_ALLOCATOR_MAPPED_EXCLUSIVE flag requires opening the file " - "in shared mode"); - - if (filename) { - ctx->filename = THAlloc(strlen(filename)+1); - strcpy(ctx->filename, filename); - } else { - ctx->filename = unknown_filename; - } - ctx->flags = flags; - ctx->size = 0; - ctx->fd = -1; - - return ctx; -} - -THMapAllocatorContext *THMapAllocatorContext_newWithFd(const char *filename, int fd, int flags) -{ - THMapAllocatorContext *ctx = THMapAllocatorContext_new(filename, flags); - ctx->fd = fd; - - return ctx; -} - -char * THMapAllocatorContext_filename(THMapAllocatorContext *ctx) -{ - return ctx->filename; -} - -int THMapAllocatorContext_fd(THMapAllocatorContext *ctx) -{ - return ctx->fd; -} - -ptrdiff_t THMapAllocatorContext_size(THMapAllocatorContext *ctx) -{ - return ctx->size; -} - -void THMapAllocatorContext_free(THMapAllocatorContext *ctx) -{ - if (ctx->filename != unknown_filename) - THFree(ctx->filename); - THFree(ctx); -} - -static void *_map_alloc(void* ctx_, ptrdiff_t size) -{ - THMapAllocatorContext *ctx = ctx_; - void *data = NULL; - -#ifdef _WIN32 - { - HANDLE hfile; - HANDLE hmfile; - LARGE_INTEGER hfilesz; - - if (ctx->flags & TH_ALLOCATOR_MAPPED_EXCLUSIVE) - THError("exclusive file mapping is not supported on Windows"); - if (ctx->flags & TH_ALLOCATOR_MAPPED_NOCREATE) - THError("file mapping without creation is not supported on Windows"); - if (ctx->flags & TH_ALLOCATOR_MAPPED_KEEPFD) - THError("TH_ALLOCATOR_MAPPED_KEEPFD not supported on Windows"); - if (ctx->flags & TH_ALLOCATOR_MAPPED_FROMFD) - THError("TH_ALLOCATOR_MAPPED_FROMFD not supported on Windows"); - - /* open file */ - /* FILE_FLAG_RANDOM_ACCESS ? */ - if(ctx->flags) - { - hfile = CreateFileA(ctx->filename, GENERIC_READ|GENERIC_WRITE, FILE_SHARE_WRITE|FILE_SHARE_READ, 0, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0); - if (hfile == INVALID_HANDLE_VALUE) - THError("could not open file <%s> in read-write mode; error code: <%d>", ctx->filename, GetLastError()); - } - else - { - hfile = CreateFileA(ctx->filename, GENERIC_READ, FILE_SHARE_WRITE|FILE_SHARE_READ, 0, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0); - if (hfile == INVALID_HANDLE_VALUE) - THError("could not open file <%s> in read-only mode; error code: <%d>", ctx->filename, GetLastError()); - } - - if (GetFileSizeEx(hfile, &hfilesz) == 0) - { - THError("could not get file size: <%s>; error code: <%d>", ctx->filename, GetLastError()); - } - - if(size > 0) - { - if(size > hfilesz.QuadPart) - { - if(ctx->flags) - { - hfilesz.QuadPart = size; - if(SetFilePointerEx(hfile, hfilesz, NULL, FILE_BEGIN) == 0) - { - CloseHandle(hfile); - THError("unable to stretch file <%s> to the right size; error code: <%d>", ctx->filename, GetLastError()); - } - if(SetEndOfFile(hfile) == 0) - { - CloseHandle(hfile); - THError("unable to write to file <%s>; error code: <%d>", ctx->filename, GetLastError()); - } - } - else - { - CloseHandle(hfile); - THError("file <%s> size is smaller than the required mapping size <%ld>; error code: <%d>", ctx->filename, size, GetLastError()); - } - } - } - else - size = hfilesz.QuadPart; - - ctx->size = size; /* if we are here, it must be the right size */ - - hfilesz.QuadPart = ctx->size; - - /* get map handle */ - if(ctx->flags) - { - if( (hmfile = CreateFileMapping(hfile, NULL, PAGE_READWRITE, hfilesz.HighPart, hfilesz.LowPart, NULL)) == NULL ) - THError("could not create a map on file <%s>; error code: <%d>", ctx->filename, GetLastError()); - } - else - { - if( (hmfile = CreateFileMapping(hfile, NULL, PAGE_WRITECOPY, hfilesz.HighPart, hfilesz.LowPart, NULL)) == NULL ) - THError("could not create a map on file <%s>; error code: <%d>", ctx->filename, GetLastError()); - } - - /* map the stuff */ - if(ctx->flags) - data = MapViewOfFile(hmfile, FILE_MAP_ALL_ACCESS, 0, 0, 0); - else - data = MapViewOfFile(hmfile, FILE_MAP_COPY, 0, 0, 0); - - CloseHandle(hfile); - CloseHandle(hmfile); - } -#else /* _WIN32 */ - { - /* open file */ - int fd; - int flags; - struct stat file_stat; - - if (ctx->flags & (TH_ALLOCATOR_MAPPED_SHARED | TH_ALLOCATOR_MAPPED_SHAREDMEM)) - flags = O_RDWR | O_CREAT; - else - flags = O_RDONLY; - - if (ctx->flags & TH_ALLOCATOR_MAPPED_EXCLUSIVE) - flags |= O_EXCL; - if (ctx->flags & TH_ALLOCATOR_MAPPED_NOCREATE) - flags &= ~O_CREAT; - - if (!(ctx->flags & TH_ALLOCATOR_MAPPED_FROMFD)) { - if(ctx->flags & TH_ALLOCATOR_MAPPED_SHARED) - { - if((fd = open(ctx->filename, flags, (mode_t)0600)) == -1) - THError("unable to open file <%s> in read-write mode", ctx->filename); - } - else if (ctx->flags & TH_ALLOCATOR_MAPPED_SHAREDMEM) - { -#ifdef HAVE_SHM_OPEN - if((fd = shm_open(ctx->filename, flags, (mode_t)0600)) == -1) - THError("unable to open shared memory object <%s> in read-write mode", ctx->filename); -#else - THError("unable to open file <%s> in sharedmem mode, shm_open unavailable on this platform", ctx->filename); -#endif - } - else - { - if((fd = open(ctx->filename, O_RDONLY)) == -1) - THError("unable to open file <%s> in read-only mode", ctx->filename); - } - } else { - fd = ctx->fd; - } - - if(fstat(fd, &file_stat) == -1) - { - if (!(ctx->flags & TH_ALLOCATOR_MAPPED_FROMFD)) - close(fd); - THError("unable to stat the file <%s>", ctx->filename); - } - - if(size > 0) - { - if(size > file_stat.st_size) - { - if(ctx->flags) - { - if(ftruncate(fd, size) == -1) - THError("unable to resize file <%s> to the right size", ctx->filename); - if(fstat(fd, &file_stat) == -1 || file_stat.st_size < size) - { - close(fd); - THError("unable to stretch file <%s> to the right size", ctx->filename); - } -/* on OS X write returns with errno 45 (Opperation not supported) when used - * with a file descriptor obtained via shm_open - */ -#ifndef __APPLE__ - if((write(fd, "", 1)) != 1) /* note that the string "" contains the '\0' byte ... */ - { - close(fd); - THError("unable to write to file <%s>", ctx->filename); - } -#endif - } - else - { - close(fd); - THError("file <%s> size is smaller than the required mapping size <%ld>", ctx->filename, size); - } - } - } - else - size = file_stat.st_size; - - ctx->size = size; /* if we are here, it must be the right size */ - - /* map it */ - if (ctx->flags & (TH_ALLOCATOR_MAPPED_SHARED | TH_ALLOCATOR_MAPPED_SHAREDMEM)) - data = mmap(NULL, ctx->size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); - else - data = mmap(NULL, ctx->size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); - - if (ctx->flags & TH_ALLOCATOR_MAPPED_KEEPFD) { - ctx->fd = fd; - } else { - if(close(fd) == -1) - THError("Error closing file <%s>", ctx->filename); - ctx->fd = -1; - } - - if (ctx->flags & TH_ALLOCATOR_MAPPED_UNLINK) { - if (ctx->flags & TH_ALLOCATOR_MAPPED_SHAREDMEM) - { -#ifdef HAVE_SHM_UNLINK - if (shm_unlink(ctx->filename) == -1) - THError("could not unlink the shared memory file %s", ctx->filename); -#else - THError("could not unlink the shared memory file %s, shm_unlink not available on platform", ctx->filename); -#endif - } - else - { - if (unlink(ctx->filename) == -1) - THError("could not unlink file %s", ctx->filename); - } - } - - if(data == MAP_FAILED) - { - data = NULL; /* let's be sure it is NULL */ - THError("$ Torch: unable to mmap memory: you tried to mmap %dGB.", ctx->size/1073741824); - } - } -#endif - - return data; -} - -static void * THMapAllocator_alloc(void *ctx, ptrdiff_t size) { - return _map_alloc(ctx, size); -} - -static void *THMapAllocator_realloc(void* ctx, void* ptr, ptrdiff_t size) { - THError("cannot realloc mapped data"); - return NULL; -} - -static void THMapAllocator_free(void* ctx_, void* data) { - THMapAllocatorContext *ctx = ctx_; - -#ifdef _WIN32 - if(UnmapViewOfFile(data) == 0) - THError("could not unmap the shared memory file"); -#else /* _WIN32 */ - if (ctx->flags & TH_ALLOCATOR_MAPPED_KEEPFD) { - if (close(ctx->fd) == -1) - THError("could not close file descriptor %d", ctx->fd); - } - - if (munmap(data, ctx->size)) - THError("could not unmap the shared memory file"); - - if (!(ctx->flags & (TH_ALLOCATOR_MAPPED_FROMFD | TH_ALLOCATOR_MAPPED_UNLINK))) - { - if (ctx->flags & TH_ALLOCATOR_MAPPED_SHAREDMEM) - { -#ifdef HAVE_SHM_UNLINK - if (shm_unlink(ctx->filename) == -1) - THError("could not unlink the shared memory file %s", ctx->filename); -#else - THError("could not unlink the shared memory file %s, shm_unlink not available on platform", ctx->filename); -#endif - } - } -#endif /* _WIN32 */ - - THMapAllocatorContext_free(ctx); -} - -#else - -THMapAllocatorContext *THMapAllocatorContext_new(const char *filename, int flags) { - THError("file mapping not supported on your system"); - return NULL; -} - -void THMapAllocatorContext_free(THMapAllocatorContext *ctx) { - THError("file mapping not supported on your system"); -} - -static void *THMapAllocator_alloc(void* ctx_, ptrdiff_t size) { - THError("file mapping not supported on your system"); - return NULL; -} - -static void *THMapAllocator_realloc(void* ctx, void* ptr, ptrdiff_t size) { - THError("file mapping not supported on your system"); - return NULL; -} - -static void THMapAllocator_free(void* ctx, void* data) { - THError("file mapping not supported on your system"); -} - -#endif - -#if (defined(_WIN32) || defined(HAVE_MMAP)) && defined(TH_ATOMIC_IPC_REFCOUNT) - -static void * THRefcountedMapAllocator_alloc(void *_ctx, ptrdiff_t size) { - THMapAllocatorContext *ctx = _ctx; - - if (ctx->flags & TH_ALLOCATOR_MAPPED_FROMFD) - THError("THRefcountedMapAllocator doesn't support TH_ALLOCATOR_MAPPED_FROMFD flag"); - if (ctx->flags & TH_ALLOCATOR_MAPPED_KEEPFD) - THError("THRefcountedMapAllocator doesn't support TH_ALLOCATOR_MAPPED_KEEPFD flag"); - if (ctx->flags & TH_ALLOCATOR_MAPPED_UNLINK) - THError("THRefcountedMapAllocator doesn't support TH_ALLOCATOR_MAPPED_UNLINK flag"); - if (!(ctx->flags & TH_ALLOCATOR_MAPPED_SHAREDMEM)) - THError("THRefcountedMapAllocator requires TH_ALLOCATOR_MAPPED_SHAREDMEM flag"); - - size = size + TH_ALLOC_ALIGNMENT; - void *ptr = _map_alloc(ctx, size); - char *data = ((char*)ptr) + TH_ALLOC_ALIGNMENT; - THMapInfo *map_info = (THMapInfo*)ptr; - - if (ctx->flags & TH_ALLOCATOR_MAPPED_EXCLUSIVE) - map_info->refcount = 1; - else - THAtomicIncrementRef(&map_info->refcount); - - return (void*)data; -} - -static void *THRefcountedMapAllocator_realloc(void* ctx, void* ptr, ptrdiff_t size) { - THError("cannot realloc mapped data"); - return NULL; -} - -static void THRefcountedMapAllocator_free(void* ctx_, void* data) { - THMapAllocatorContext *ctx = ctx_; - -#ifdef _WIN32 - if(UnmapViewOfFile(data) == 0) - THError("could not unmap the shared memory file"); -#else /* _WIN32 */ - - THMapInfo *info = (THMapInfo*)(((char*)data) - TH_ALLOC_ALIGNMENT); - if (THAtomicDecrementRef(&info->refcount)) { -#ifdef HAVE_SHM_UNLINK - if (shm_unlink(ctx->filename) == -1) - THError("could not unlink the shared memory file %s", ctx->filename); -#else - THError("could not unlink the shared memory file %s, shm_unlink not available on platform", ctx->filename); -#endif /* HAVE_SHM_UNLINK */ - } - if (munmap(info, ctx->size)) - THError("could not unmap the shared memory file %s", ctx->filename); -#endif /* _WIN32 */ - - THMapAllocatorContext_free(ctx); -} - -void THRefcountedMapAllocator_incref(THMapAllocatorContext *ctx, void *data) -{ - THMapInfo *map_info = (THMapInfo*)(((char*)data) - TH_ALLOC_ALIGNMENT); - THAtomicIncrementRef(&map_info->refcount); -} - -int THRefcountedMapAllocator_decref(THMapAllocatorContext *ctx, void *data) -{ - THMapInfo *map_info = (THMapInfo*)(((char*)data) - TH_ALLOC_ALIGNMENT); - return THAtomicDecrementRef(&map_info->refcount); -} - -#else - -static void * THRefcountedMapAllocator_alloc(void *ctx, ptrdiff_t size) { - THError("refcounted file mapping not supported on your system"); - return NULL; -} - -static void *THRefcountedMapAllocator_realloc(void* ctx, void* ptr, ptrdiff_t size) { - THError("refcounted file mapping not supported on your system"); - return NULL; -} - -static void THRefcountedMapAllocator_free(void* ctx_, void* data) { - THError("refcounted file mapping not supported on your system"); -} - -void THRefcountedMapAllocator_incref(THMapAllocatorContext *ctx, void *data) -{ - THError("refcounted file mapping not supported on your system"); -} - -int THRefcountedMapAllocator_decref(THMapAllocatorContext *ctx, void *data) -{ - THError("refcounted file mapping not supported on your system"); - return 0; -} - -#endif - -THAllocator THMapAllocator = { - &THMapAllocator_alloc, - &THMapAllocator_realloc, - &THMapAllocator_free -}; - -THAllocator THRefcountedMapAllocator = { - &THRefcountedMapAllocator_alloc, - &THRefcountedMapAllocator_realloc, - &THRefcountedMapAllocator_free -}; diff --git a/contrib/lua-torch/torch7/lib/TH/THAllocator.h b/contrib/lua-torch/torch7/lib/TH/THAllocator.h deleted file mode 100644 index 18fc9ec0a2..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THAllocator.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef TH_ALLOCATOR_INC -#define TH_ALLOCATOR_INC - -#include "THGeneral.h" - -#define TH_ALLOCATOR_MAPPED_SHARED 1 -#define TH_ALLOCATOR_MAPPED_SHAREDMEM 2 -#define TH_ALLOCATOR_MAPPED_EXCLUSIVE 4 -#define TH_ALLOCATOR_MAPPED_NOCREATE 8 -#define TH_ALLOCATOR_MAPPED_KEEPFD 16 -#define TH_ALLOCATOR_MAPPED_FROMFD 32 -#define TH_ALLOCATOR_MAPPED_UNLINK 64 - -/* Custom allocator - */ -typedef struct THAllocator { - void* (*malloc)(void*, ptrdiff_t); - void* (*realloc)(void*, void*, ptrdiff_t); - void (*free)(void*, void*); -} THAllocator; - -/* default malloc/free allocator. malloc and realloc raise an error (using - * THError) on allocation failure. - */ -extern THAllocator THDefaultAllocator; - -/* file map allocator - */ -typedef struct THMapAllocatorContext_ THMapAllocatorContext; -TH_API THMapAllocatorContext *THMapAllocatorContext_new(const char *filename, int flags); -TH_API THMapAllocatorContext *THMapAllocatorContext_newWithFd(const char *filename, - int fd, int flags); -TH_API char * THMapAllocatorContext_filename(THMapAllocatorContext *ctx); -TH_API int THMapAllocatorContext_fd(THMapAllocatorContext *ctx); -TH_API ptrdiff_t THMapAllocatorContext_size(THMapAllocatorContext *ctx); -TH_API void THMapAllocatorContext_free(THMapAllocatorContext *ctx); -TH_API void THRefcountedMapAllocator_incref(THMapAllocatorContext *ctx, void *data); -TH_API int THRefcountedMapAllocator_decref(THMapAllocatorContext *ctx, void *data); - -extern THAllocator THMapAllocator; -extern THAllocator THRefcountedMapAllocator; - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THAtomic.c b/contrib/lua-torch/torch7/lib/TH/THAtomic.c deleted file mode 100644 index 714fc52dbc..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THAtomic.c +++ /dev/null @@ -1,267 +0,0 @@ -#include "THAtomic.h" - -/* - Note: I thank Leon Bottou for his useful comments. - Ronan. -*/ - -#if defined(USE_C11_ATOMICS) -#include -#endif - -#if defined(USE_MSC_ATOMICS) -#include -#include -#endif - -#if !defined(USE_MSC_ATOMICS) && !defined(USE_GCC_ATOMICS) && defined(USE_PTHREAD_ATOMICS) -#include -static pthread_mutex_t ptm = PTHREAD_MUTEX_INITIALIZER; -#endif - -void THAtomicSet(int volatile *a, int newvalue) -{ -#if defined(USE_C11_ATOMICS) - atomic_store(a, newvalue); -#elif defined(USE_MSC_ATOMICS) - assert(sizeof(int) == sizeof(long)); - _InterlockedExchange((long*)a, newvalue); -#elif defined(USE_GCC_ATOMICS) - __sync_lock_test_and_set(a, newvalue); -#else - int oldvalue; - do { - oldvalue = *a; - } while (!THAtomicCompareAndSwap(a, oldvalue, newvalue)); -#endif -} - -int THAtomicGet(int volatile *a) -{ -#if defined(USE_C11_ATOMICS) - return atomic_load(a); -#else - int value; - do { - value = *a; - } while (!THAtomicCompareAndSwap(a, value, value)); - return value; -#endif -} - -int THAtomicAdd(int volatile *a, int value) -{ -#if defined(USE_C11_ATOMICS) - return atomic_fetch_add(a, value); -#elif defined(USE_MSC_ATOMICS) - assert(sizeof(int) == sizeof(long)); - return _InterlockedExchangeAdd((long*)a, value); -#elif defined(USE_GCC_ATOMICS) - return __sync_fetch_and_add(a, value); -#else - int oldvalue; - do { - oldvalue = *a; - } while (!THAtomicCompareAndSwap(a, oldvalue, (oldvalue + value))); - return oldvalue; -#endif -} - -void THAtomicIncrementRef(int volatile *a) -{ - THAtomicAdd(a, 1); -} - -int THAtomicDecrementRef(int volatile *a) -{ - return (THAtomicAdd(a, -1) == 1); -} - -int THAtomicCompareAndSwap(int volatile *a, int oldvalue, int newvalue) -{ -#if defined(USE_C11_ATOMICS) - return atomic_compare_exchange_strong(a, &oldvalue, newvalue); -#elif defined(USE_MSC_ATOMICS) - assert(sizeof(int) == sizeof(long)); - return (_InterlockedCompareExchange((long*)a, (long)newvalue, (long)oldvalue) == (long)oldvalue); -#elif defined(USE_GCC_ATOMICS) - return __sync_bool_compare_and_swap(a, oldvalue, newvalue); -#elif defined(USE_PTHREAD_ATOMICS) - int ret = 0; - pthread_mutex_lock(&ptm); - if(*a == oldvalue) { - *a = newvalue; - ret = 1; - } - pthread_mutex_unlock(&ptm); - return ret; -#else -#warning THAtomic is not thread safe - if(*a == oldvalue) { - *a = newvalue; - return 1; - } - else - return 0; -#endif -} - -void THAtomicSetLong(long volatile *a, long newvalue) -{ -#if defined(USE_C11_ATOMICS) - atomic_store(a, newvalue); -#elif defined(USE_MSC_ATOMICS) - _InterlockedExchange(a, newvalue); -#elif defined(USE_GCC_ATOMICS) - __sync_lock_test_and_set(a, newvalue); -#else - long oldvalue; - do { - oldvalue = *a; - } while (!THAtomicCompareAndSwapLong(a, oldvalue, newvalue)); -#endif -} - -long THAtomicGetLong(long volatile *a) -{ -#if defined(USE_C11_ATOMICS) - return atomic_load(a); -#else - long value; - do { - value = *a; - } while (!THAtomicCompareAndSwapLong(a, value, value)); - return value; -#endif -} - -long THAtomicAddLong(long volatile *a, long value) -{ -#if defined(USE_C11_ATOMICS) - return atomic_fetch_add(a, value); -#elif defined(USE_MSC_ATOMICS) - return _InterlockedExchangeAdd(a, value); -#elif defined(USE_GCC_ATOMICS) - return __sync_fetch_and_add(a, value); -#else - long oldvalue; - do { - oldvalue = *a; - } while (!THAtomicCompareAndSwapLong(a, oldvalue, (oldvalue + value))); - return oldvalue; -#endif -} - -long THAtomicCompareAndSwapLong(long volatile *a, long oldvalue, long newvalue) -{ -#if defined(USE_C11_ATOMICS) - return atomic_compare_exchange_strong(a, &oldvalue, newvalue); -#elif defined(USE_MSC_ATOMICS) - return (_InterlockedCompareExchange(a, newvalue, oldvalue) == oldvalue); -#elif defined(USE_GCC_ATOMICS) - return __sync_bool_compare_and_swap(a, oldvalue, newvalue); -#elif defined(USE_PTHREAD_ATOMICS) - long ret = 0; - pthread_mutex_lock(&ptm); - if(*a == oldvalue) { - *a = newvalue; - ret = 1; - } - pthread_mutex_unlock(&ptm); - return ret; -#else -#warning THAtomic is not thread safe - if(*a == oldvalue) { - *a = newvalue; - return 1; - } - else - return 0; -#endif -} - -void THAtomicSetPtrdiff(ptrdiff_t volatile *a, ptrdiff_t newvalue) -{ -#if defined(USE_C11_ATOMICS) - atomic_store(a, newvalue); -#elif defined(USE_MSC_ATOMICS) -#ifdef _WIN64 - _InterlockedExchange64(a, newvalue); -#else - _InterlockedExchange(a, newvalue); -#endif -#elif defined(USE_GCC_ATOMICS) - __sync_lock_test_and_set(a, newvalue); -#else - ptrdiff_t oldvalue; - do { - oldvalue = *a; - } while (!THAtomicCompareAndSwapPtrdiff(a, oldvalue, newvalue)); -#endif -} - -ptrdiff_t THAtomicGetPtrdiff(ptrdiff_t volatile *a) -{ -#if defined(USE_C11_ATOMICS) - return atomic_load(a); -#else - ptrdiff_t value; - do { - value = *a; - } while (!THAtomicCompareAndSwapPtrdiff(a, value, value)); - return value; -#endif -} - -ptrdiff_t THAtomicAddPtrdiff(ptrdiff_t volatile *a, ptrdiff_t value) -{ -#if defined(USE_C11_ATOMICS) - return atomic_fetch_add(a, value); -#elif defined(USE_MSC_ATOMICS) -#ifdef _WIN64 - return _InterlockedExchangeAdd64(a, value); -#else - return _InterlockedExchangeAdd(a, value); -#endif -#elif defined(USE_GCC_ATOMICS) - return __sync_fetch_and_add(a, value); -#else - ptrdiff_t oldvalue; - do { - oldvalue = *a; - } while (!THAtomicCompareAndSwapPtrdiff(a, oldvalue, (oldvalue + value))); - return oldvalue; -#endif -} - -ptrdiff_t THAtomicCompareAndSwapPtrdiff(ptrdiff_t volatile *a, ptrdiff_t oldvalue, ptrdiff_t newvalue) -{ -#if defined(USE_C11_ATOMICS) - return atomic_compare_exchange_strong(a, &oldvalue, newvalue); -#elif defined(USE_MSC_ATOMICS) -#ifdef _WIN64 - return (_InterlockedCompareExchange64(a, newvalue, oldvalue) == oldvalue); -#else - return (_InterlockedCompareExchange(a, newvalue, oldvalue) == oldvalue); -#endif -#elif defined(USE_GCC_ATOMICS) - return __sync_bool_compare_and_swap(a, oldvalue, newvalue); -#elif defined(USE_PTHREAD_ATOMICS) - ptrdiff_t ret = 0; - pthread_mutex_lock(&ptm); - if(*a == oldvalue) { - *a = newvalue; - ret = 1; - } - pthread_mutex_unlock(&ptm); - return ret; -#else -#warning THAtomic is not thread safe - if(*a == oldvalue) { - *a = newvalue; - return 1; - } - else - return 0; -#endif -} diff --git a/contrib/lua-torch/torch7/lib/TH/THAtomic.h b/contrib/lua-torch/torch7/lib/TH/THAtomic.h deleted file mode 100644 index d77b20b240..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THAtomic.h +++ /dev/null @@ -1,125 +0,0 @@ -#ifndef TH_ATOMIC_INC -#define TH_ATOMIC_INC - -#include "THGeneral.h" - -/****************************************************************************** - * Atomic operations for TH - * Five backends are integrated: - * - C11 atomic operations - * - MSVC intrinsics - * - GCC intrinsics - * - Pthread if none of the above is available - * - Unsafe mode in none of the above is available - ******************************************************************************/ - - -/****************************************************************************** - * all-purpose functions - ******************************************************************************/ - -/* - * *a = newvalue -*/ -TH_API void THAtomicSet(int volatile *a, int newvalue); - -/* - * return *a -*/ -TH_API int THAtomicGet(int volatile *a); - -/* - * *a += value, - * return previous *a -*/ -TH_API int THAtomicAdd(int volatile *a, int value); - -/* - * check if (*a == oldvalue) - * if true: set *a to newvalue, return 1 - * if false: return 0 -*/ -TH_API int THAtomicCompareAndSwap(int volatile *a, int oldvalue, int newvalue); - - -/****************************************************************************** - * refcounting functions - ******************************************************************************/ - -/* - * *a++ -*/ -TH_API void THAtomicIncrementRef(int volatile *a); - -/* - * *a--, - * return 1 if *a == 0 after the operation, 0 otherwise -*/ -TH_API int THAtomicDecrementRef(int volatile *a); - - - -/****************************************************************************** - * functions for long type - ******************************************************************************/ - -/* - * *a = newvalue -*/ -TH_API void THAtomicSetLong(long volatile *a, long newvalue); - -/* - * return *a -*/ -TH_API long THAtomicGetLong(long volatile *a); - -/* - * *a += value, - * return previous *a -*/ -TH_API long THAtomicAddLong(long volatile *a, long value); - -/* - * check if (*a == oldvalue) - * if true: set *a to newvalue, return 1 - * if false: return 0 -*/ -TH_API long THAtomicCompareAndSwapLong(long volatile *a, long oldvalue, long newvalue); - - - -/****************************************************************************** - * functions for ptrdiff_t type - ******************************************************************************/ - -/* - * *a = newvalue -*/ -TH_API void THAtomicSetPtrdiff(ptrdiff_t volatile *a, ptrdiff_t newvalue); - -/* - * return *a -*/ -TH_API ptrdiff_t THAtomicGetPtrdiff(ptrdiff_t volatile *a); - -/* - * *a += value, - * return previous *a -*/ -TH_API ptrdiff_t THAtomicAddPtrdiff(ptrdiff_t volatile *a, ptrdiff_t value); - -/* - * check if (*a == oldvalue) - * if true: set *a to newvalue, return 1 - * if false: return 0 -*/ -TH_API ptrdiff_t THAtomicCompareAndSwapPtrdiff(ptrdiff_t volatile *a, ptrdiff_t oldvalue, ptrdiff_t newvalue); - -#if defined(USE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) && \ - ATOMIC_INT_LOCK_FREE == 2 -#define TH_ATOMIC_IPC_REFCOUNT 1 -#elif defined(USE_MSC_ATOMICS) || defined(USE_GCC_ATOMICS) -#define TH_ATOMIC_IPC_REFCOUNT 1 -#endif - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THBlas.c b/contrib/lua-torch/torch7/lib/TH/THBlas.c deleted file mode 100644 index 35618b26a1..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THBlas.c +++ /dev/null @@ -1,4 +0,0 @@ -#include "THBlas.h" - -#include "generic/THBlas.c" -#include "THGenerateAllTypes.h" diff --git a/contrib/lua-torch/torch7/lib/TH/THBlas.h b/contrib/lua-torch/torch7/lib/TH/THBlas.h deleted file mode 100644 index 5fef0febcd..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THBlas.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef TH_BLAS_INC -#define TH_BLAS_INC - -#include "THGeneral.h" - -#define THBlas_(NAME) TH_CONCAT_4(TH,Real,Blas_,NAME) - -#include "generic/THBlas.h" -#include "THGenerateAllTypes.h" - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THConfig.cmake.in b/contrib/lua-torch/torch7/lib/TH/THConfig.cmake.in deleted file mode 100644 index 306cd878bc..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THConfig.cmake.in +++ /dev/null @@ -1,9 +0,0 @@ -# Find the TH includes and library -# -# TH_INCLUDE_DIR -- where to find the includes -# TH_LIBRARIES -- list of libraries to link against -# TH_FOUND -- set to 1 if found - -SET(TH_FOUND 1) -SET(TH_INCLUDE_DIR "@TH_INCLUDE_DIR@") -SET(TH_LIBRARIES "@TH_LIBRARIES@") diff --git a/contrib/lua-torch/torch7/lib/TH/THDiskFile.c b/contrib/lua-torch/torch7/lib/TH/THDiskFile.c deleted file mode 100644 index 3f57b3b35c..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THDiskFile.c +++ /dev/null @@ -1,797 +0,0 @@ -#include "THGeneral.h" -#include "THDiskFile.h" -#include "THFilePrivate.h" - -#include -#ifndef LLONG_MAX -#define LLONG_MAX 9223372036854775807LL -#endif - -typedef struct THDiskFile__ -{ - THFile file; - - FILE *handle; - char *name; - int isNativeEncoding; - int longSize; - -} THDiskFile; - -static int THDiskFile_isOpened(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)self; - return (dfself->handle != NULL); -} - -const char *THDiskFile_name(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)self; - return dfself->name; -} - -/* workaround mac osx lion ***insane*** fread bug */ -#ifdef __APPLE__ -size_t fread__(void *ptr, size_t size, size_t nitems, FILE *stream) -{ - size_t nread = 0; - while(!feof(stream) && !ferror(stream) && (nread < nitems)) - nread += fread((char*)ptr+nread*size, size, THMin(2147483648/size, nitems-nread), stream); - return nread; -} -#else -#define fread__ fread -#endif - -#define READ_WRITE_METHODS(TYPE, TYPEC, ASCII_READ_ELEM, ASCII_WRITE_ELEM) \ - static size_t THDiskFile_read##TYPEC(THFile *self, TYPE *data, size_t n) \ - { \ - THDiskFile *dfself = (THDiskFile*)(self); \ - size_t nread = 0L; \ - \ - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); \ - THArgCheck(dfself->file.isReadable, 1, "attempt to read in a write-only file"); \ - \ - if(dfself->file.isBinary) \ - { \ - nread = fread__(data, sizeof(TYPE), n, dfself->handle); \ - if(!dfself->isNativeEncoding && (sizeof(TYPE) > 1) && (nread > 0)) \ - THDiskFile_reverseMemory(data, data, sizeof(TYPE), nread); \ - } \ - else \ - { \ - size_t i; \ - for(i = 0; i < n; i++) \ - { \ - ASCII_READ_ELEM; /* increment here result and break if wrong */ \ - } \ - if(dfself->file.isAutoSpacing && (n > 0)) \ - { \ - int c = fgetc(dfself->handle); \ - if( (c != '\n') && (c != EOF) ) \ - ungetc(c, dfself->handle); \ - } \ - } \ - \ - if(nread != n) \ - { \ - dfself->file.hasError = 1; /* shouldn't we put hasError to 0 all the time ? */ \ - if(!dfself->file.isQuiet) \ - THError("read error: read %d blocks instead of %d", nread, n); \ - } \ - \ - return nread; \ - } \ - \ - static size_t THDiskFile_write##TYPEC(THFile *self, TYPE *data, size_t n) \ - { \ - THDiskFile *dfself = (THDiskFile*)(self); \ - size_t nwrite = 0L; \ - \ - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); \ - THArgCheck(dfself->file.isWritable, 1, "attempt to write in a read-only file"); \ - \ - if(dfself->file.isBinary) \ - { \ - if(dfself->isNativeEncoding) \ - { \ - nwrite = fwrite(data, sizeof(TYPE), n, dfself->handle); \ - } \ - else \ - { \ - if(sizeof(TYPE) > 1) \ - { \ - char *buffer = THAlloc(sizeof(TYPE)*n); \ - THDiskFile_reverseMemory(buffer, data, sizeof(TYPE), n); \ - nwrite = fwrite(buffer, sizeof(TYPE), n, dfself->handle); \ - THFree(buffer); \ - } \ - else \ - nwrite = fwrite(data, sizeof(TYPE), n, dfself->handle); \ - } \ - } \ - else \ - { \ - size_t i; \ - for(i = 0; i < n; i++) \ - { \ - ASCII_WRITE_ELEM; \ - if( dfself->file.isAutoSpacing && (i < n-1) ) \ - fprintf(dfself->handle, " "); \ - } \ - if(dfself->file.isAutoSpacing && (n > 0)) \ - fprintf(dfself->handle, "\n"); \ - } \ - \ - if(nwrite != n) \ - { \ - dfself->file.hasError = 1; \ - if(!dfself->file.isQuiet) \ - THError("write error: wrote %d blocks instead of %d", nwrite, n); \ - } \ - \ - return nwrite; \ -} - -static int THDiskFile_mode(const char *mode, int *isReadable, int *isWritable) -{ - *isReadable = 0; - *isWritable = 0; - if(strlen(mode) == 1) - { - if(*mode == 'r') - { - *isReadable = 1; - return 1; - } - else if(*mode == 'w') - { - *isWritable = 1; - return 1; - } - } - else if(strlen(mode) == 2) - { - if(mode[0] == 'r' && mode[1] == 'w') - { - *isReadable = 1; - *isWritable = 1; - return 1; - } - } - return 0; -} - -static void THDiskFile_synchronize(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)(self); - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - fflush(dfself->handle); -} - -static void THDiskFile_seek(THFile *self, size_t position) -{ - THDiskFile *dfself = (THDiskFile*)(self); - - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - -#if defined(_WIN64) - THArgCheck(position <= (size_t)INT64_MAX, 2, "position must be smaller than INT64_MAX"); - if(_fseeki64(dfself->handle, (__int64)position, SEEK_SET) < 0) -#elif defined(_WIN32) - THArgCheck(position <= (size_t)LONG_MAX, 2, "position must be smaller than LONG_MAX"); - if(fseek(dfself->handle, (long)position, SEEK_SET) < 0) -#else - THArgCheck(position <= (size_t)LLONG_MAX, 2, "position must be smaller than LLONG_MAX"); - if(fseeko(dfself->handle, (off_t)position, SEEK_SET) < 0) -#endif - { - dfself->file.hasError = 1; - if(!dfself->file.isQuiet) - THError("unable to seek to position %zu", position); - } -} - -static void THDiskFile_seekEnd(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)(self); - - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - -#if defined(_WIN64) - if(_fseeki64(dfself->handle, 0, SEEK_END) < 0) -#elif defined(_WIN32) - if(fseek(dfself->handle, 0, SEEK_END) < 0) -#else - if(fseeko(dfself->handle, 0, SEEK_END) < 0) -#endif - { - dfself->file.hasError = 1; - if(!dfself->file.isQuiet) - THError("unable to seek at end of file"); - } -} - -static size_t THDiskFile_position(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)(self); - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - -#if defined(_WIN64) - __int64 offset = _ftelli64(dfself->handle); -#elif defined(_WIN32) - long offset = ftell(dfself->handle); -#else - off_t offset = ftello(dfself->handle); -#endif - if (offset > -1) - return (size_t)offset; - else if(!dfself->file.isQuiet) - THError("unable to obtain disk file offset (maybe a long overflow occurred)"); - - return 0; -} - -static void THDiskFile_close(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)(self); - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - fclose(dfself->handle); - dfself->handle = NULL; -} - -/* Little and Big Endian */ - -static void THDiskFile_reverseMemory(void *dst, const void *src, size_t blockSize, size_t numBlocks) -{ - if(blockSize > 1) - { - size_t halfBlockSize = blockSize/2; - char *charSrc = (char*)src; - char *charDst = (char*)dst; - size_t b, i; - for(b = 0; b < numBlocks; b++) - { - for(i = 0; i < halfBlockSize; i++) - { - char z = charSrc[i]; - charDst[i] = charSrc[blockSize-1-i]; - charDst[blockSize-1-i] = z; - } - charSrc += blockSize; - charDst += blockSize; - } - } -} - -int THDiskFile_isLittleEndianCPU(void) -{ - int x = 7; - char *ptr = (char *)&x; - - if(ptr[0] == 0) - return 0; - else - return 1; -} - -int THDiskFile_isBigEndianCPU(void) -{ - return(!THDiskFile_isLittleEndianCPU()); -} - -void THDiskFile_nativeEndianEncoding(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)(self); - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - dfself->isNativeEncoding = 1; -} - -void THDiskFile_littleEndianEncoding(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)(self); - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - dfself->isNativeEncoding = THDiskFile_isLittleEndianCPU(); -} - -void THDiskFile_bigEndianEncoding(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)(self); - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - dfself->isNativeEncoding = !THDiskFile_isLittleEndianCPU(); -} - -/* End of Little and Big Endian Stuff */ - -void THDiskFile_longSize(THFile *self, int size) -{ - THDiskFile *dfself = (THDiskFile*)(self); - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - THArgCheck(size == 0 || size == 4 || size == 8, 1, "Invalid long size specified"); - dfself->longSize = size; -} - -void THDiskFile_noBuffer(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)(self); - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - if (setvbuf(dfself->handle, NULL, _IONBF, 0)) { - THError("error: cannot disable buffer"); - } -} - -static void THDiskFile_free(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)(self); - if(dfself->handle) - fclose(dfself->handle); - THFree(dfself->name); - THFree(dfself); -} - -/* READ_WRITE_METHODS(int, Bool, */ -/* int value = 0; int ret = fscanf(file->handle, "%d", &value); array[i] = (value ? 1 : 0); if(ret <= 0) break; else result++, */ -/* int value = (array[i] ? 1 : 0); nElemWritten = fprintf(file->handle, "%d", value), */ -/* true) */ - -/* Note that we do a trick */ -READ_WRITE_METHODS(unsigned char, Byte, - nread = fread(data, 1, n, dfself->handle); break, - nwrite = fwrite(data, 1, n, dfself->handle); break) - -READ_WRITE_METHODS(char, Char, - nread = fread(data, 1, n, dfself->handle); break, - nwrite = fwrite(data, 1, n, dfself->handle); break) - -READ_WRITE_METHODS(short, Short, - int ret = fscanf(dfself->handle, "%hd", &data[i]); if(ret <= 0) break; else nread++, - int ret = fprintf(dfself->handle, "%hd", data[i]); if(ret <= 0) break; else nwrite++) - -READ_WRITE_METHODS(int, Int, - int ret = fscanf(dfself->handle, "%d", &data[i]); if(ret <= 0) break; else nread++, - int ret = fprintf(dfself->handle, "%d", data[i]); if(ret <= 0) break; else nwrite++) - -READ_WRITE_METHODS(float, Float, - int ret = fscanf(dfself->handle, "%g", &data[i]); if(ret <= 0) break; else nread++, - int ret = fprintf(dfself->handle, "%.9g", data[i]); if(ret <= 0) break; else nwrite++) - -READ_WRITE_METHODS(THHalf, Half, - float buf; int ret = fscanf(dfself->handle, "%g", &buf); if(ret <= 0) break; else { data[i]= TH_float2half(buf); nread++; }, - int ret = fprintf(dfself->handle, "%.9g", TH_half2float(data[i])); if(ret <= 0) break; else nwrite++) - -READ_WRITE_METHODS(double, Double, - int ret = fscanf(dfself->handle, "%lg", &data[i]); if(ret <= 0) break; else nread++, - int ret = fprintf(dfself->handle, "%.17g", data[i]); if(ret <= 0) break; else nwrite++) - - -/* For Long we need to rewrite everything, because of the special management of longSize */ -static size_t THDiskFile_readLong(THFile *self, long *data, size_t n) -{ - THDiskFile *dfself = (THDiskFile*)(self); - size_t nread = 0L; - - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - THArgCheck(dfself->file.isReadable, 1, "attempt to read in a write-only file"); - - if(dfself->file.isBinary) - { - if(dfself->longSize == 0 || dfself->longSize == sizeof(long)) - { - nread = fread__(data, sizeof(long), n, dfself->handle); - if(!dfself->isNativeEncoding && (sizeof(long) > 1) && (nread > 0)) - THDiskFile_reverseMemory(data, data, sizeof(long), nread); - } else if(dfself->longSize == 4) - { - nread = fread__(data, 4, n, dfself->handle); - if(!dfself->isNativeEncoding && (nread > 0)) - THDiskFile_reverseMemory(data, data, 4, nread); - size_t i; - for(i = nread; i > 0; i--) - data[i-1] = ((int *)data)[i-1]; - } - else /* if(dfself->longSize == 8) */ - { - int big_endian = !THDiskFile_isLittleEndianCPU(); - int32_t *buffer = THAlloc(8*n); - nread = fread__(buffer, 8, n, dfself->handle); - size_t i; - for(i = nread; i > 0; i--) - data[i-1] = buffer[2*(i-1) + big_endian]; - THFree(buffer); - if(!dfself->isNativeEncoding && (nread > 0)) - THDiskFile_reverseMemory(data, data, 4, nread); - } - } - else - { - size_t i; - for(i = 0; i < n; i++) - { - int ret = fscanf(dfself->handle, "%ld", &data[i]); if(ret <= 0) break; else nread++; - } - if(dfself->file.isAutoSpacing && (n > 0)) - { - int c = fgetc(dfself->handle); - if( (c != '\n') && (c != EOF) ) - ungetc(c, dfself->handle); - } - } - - if(nread != n) - { - dfself->file.hasError = 1; /* shouldn't we put hasError to 0 all the time ? */ - if(!dfself->file.isQuiet) - THError("read error: read %d blocks instead of %d", nread, n); - } - - return nread; -} - -static size_t THDiskFile_writeLong(THFile *self, long *data, size_t n) -{ - THDiskFile *dfself = (THDiskFile*)(self); - size_t nwrite = 0L; - - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - THArgCheck(dfself->file.isWritable, 1, "attempt to write in a read-only file"); - - if(dfself->file.isBinary) - { - if(dfself->longSize == 0 || dfself->longSize == sizeof(long)) - { - if(dfself->isNativeEncoding) - { - nwrite = fwrite(data, sizeof(long), n, dfself->handle); - } - else - { - char *buffer = THAlloc(sizeof(long)*n); - THDiskFile_reverseMemory(buffer, data, sizeof(long), n); - nwrite = fwrite(buffer, sizeof(long), n, dfself->handle); - THFree(buffer); - } - } else if(dfself->longSize == 4) - { - int32_t *buffer = THAlloc(4*n); - size_t i; - for(i = 0; i < n; i++) - buffer[i] = data[i]; - if(!dfself->isNativeEncoding) - THDiskFile_reverseMemory(buffer, buffer, 4, n); - nwrite = fwrite(buffer, 4, n, dfself->handle); - THFree(buffer); - } - else /* if(dfself->longSize == 8) */ - { - int big_endian = !THDiskFile_isLittleEndianCPU(); - int32_t *buffer = THAlloc(8*n); - size_t i; - for(i = 0; i < n; i++) - { - buffer[2*i + !big_endian] = 0; - buffer[2*i + big_endian] = data[i]; - } - if(!dfself->isNativeEncoding) - THDiskFile_reverseMemory(buffer, buffer, 8, n); - nwrite = fwrite(buffer, 8, n, dfself->handle); - THFree(buffer); - } - } - else - { - size_t i; - for(i = 0; i < n; i++) - { - int ret = fprintf(dfself->handle, "%ld", data[i]); if(ret <= 0) break; else nwrite++; - if( dfself->file.isAutoSpacing && (i < n-1) ) - fprintf(dfself->handle, " "); - } - if(dfself->file.isAutoSpacing && (n > 0)) - fprintf(dfself->handle, "\n"); - } - - if(nwrite != n) - { - dfself->file.hasError = 1; - if(!dfself->file.isQuiet) - THError("write error: wrote %d blocks instead of %d", nwrite, n); - } - - return nwrite; -} - -static size_t THDiskFile_readString(THFile *self, const char *format, char **str_) -{ - THDiskFile *dfself = (THDiskFile*)(self); - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - THArgCheck(dfself->file.isReadable, 1, "attempt to read in a write-only file"); - THArgCheck((strlen(format) >= 2 ? (format[0] == '*') && (format[1] == 'a' || format[1] == 'l') : 0), 2, "format must be '*a' or '*l'"); - -/* note: the string won't survive long, as it is copied into lua */ -/* so 1024 is not that big... */ -#define TBRS_BSZ 1024L - - if(format[1] == 'a') - { - char *p = THAlloc(TBRS_BSZ); - size_t total = TBRS_BSZ; - size_t pos = 0; - - for (;;) - { - if(total-pos == 0) /* we need more space! */ - { - total += TBRS_BSZ; - p = THRealloc(p, total); - } - pos += fread(p+pos, 1, total-pos, dfself->handle); - if (pos < total) /* eof? */ - { - if(pos == 0) - { - THFree(p); - dfself->file.hasError = 1; - if(!dfself->file.isQuiet) - THError("read error: read 0 blocks instead of 1"); - - *str_ = NULL; - return 0; - } - *str_ = p; - return pos; - } - } - } - else - { - char *p = THAlloc(TBRS_BSZ); - size_t total = TBRS_BSZ; - size_t pos = 0; - size_t size; - - for (;;) - { - if(total-pos <= 1) /* we can only write '\0' in there! */ - { - total += TBRS_BSZ; - p = THRealloc(p, total); - } - if (fgets(p+pos, total-pos, dfself->handle) == NULL) /* eof? */ - { - if(pos == 0) - { - THFree(p); - dfself->file.hasError = 1; - if(!dfself->file.isQuiet) - THError("read error: read 0 blocks instead of 1"); - - *str_ = NULL; - return 0; - } - *str_ = p; - return pos; - } - size = strlen(p+pos); - if (size == 0 || (p+pos)[size-1] != '\n') - { - pos += size; - } - else - { - pos += size-1; /* do not include `eol' */ - *str_ = p; - return pos; - } - } - } - - *str_ = NULL; - return 0; -} - - -static size_t THDiskFile_writeString(THFile *self, const char *str, size_t size) -{ - THDiskFile *dfself = (THDiskFile*)(self); - size_t nwrite; - - THArgCheck(dfself->handle != NULL, 1, "attempt to use a closed file"); - THArgCheck(dfself->file.isWritable, 1, "attempt to write in a read-only file"); - - nwrite = fwrite(str, 1, size, dfself->handle); - if(nwrite != size) - { - dfself->file.hasError = 1; - if(!dfself->file.isQuiet) - THError("write error: wrote %zu blocks instead of %zu", nwrite, size); - } - - return nwrite; -} - -THFile *THDiskFile_new(const char *name, const char *mode, int isQuiet) -{ - static struct THFileVTable vtable = { - THDiskFile_isOpened, - - THDiskFile_readByte, - THDiskFile_readChar, - THDiskFile_readShort, - THDiskFile_readInt, - THDiskFile_readLong, - THDiskFile_readFloat, - THDiskFile_readDouble, - THDiskFile_readHalf, - THDiskFile_readString, - - THDiskFile_writeByte, - THDiskFile_writeChar, - THDiskFile_writeShort, - THDiskFile_writeInt, - THDiskFile_writeLong, - THDiskFile_writeFloat, - THDiskFile_writeDouble, - THDiskFile_writeHalf, - THDiskFile_writeString, - - THDiskFile_synchronize, - THDiskFile_seek, - THDiskFile_seekEnd, - THDiskFile_position, - THDiskFile_close, - THDiskFile_free - }; - - int isReadable; - int isWritable; - FILE *handle; - THDiskFile *self; - - THArgCheck(THDiskFile_mode(mode, &isReadable, &isWritable), 2, "file mode should be 'r','w' or 'rw'"); - - if( isReadable && isWritable ) - { - handle = fopen(name, "r+b"); - if(!handle) - { - handle = fopen(name, "wb"); - if(handle) - { - fclose(handle); - handle = fopen(name, "r+b"); - } - } - } - else - handle = fopen(name, (isReadable ? "rb" : "wb")); - - if(!handle) - { - if(isQuiet) - return 0; - else - THError("cannot open <%s> in mode %c%c", name, (isReadable ? 'r' : ' '), (isWritable ? 'w' : ' ')); - } - - self = THAlloc(sizeof(THDiskFile)); - - self->handle = handle; - self->name = THAlloc(strlen(name)+1); - strcpy(self->name, name); - self->isNativeEncoding = 1; - self->longSize = 0; - - self->file.vtable = &vtable; - self->file.isQuiet = isQuiet; - self->file.isReadable = isReadable; - self->file.isWritable = isWritable; - self->file.isBinary = 0; - self->file.isAutoSpacing = 1; - self->file.hasError = 0; - - return (THFile*)self; -} - -/* PipeFile */ - -static int THPipeFile_mode(const char *mode, int *isReadable, int *isWritable) -{ - *isReadable = 0; - *isWritable = 0; - if(strlen(mode) == 1) - { - if(*mode == 'r') - { - *isReadable = 1; - return 1; - } - else if(*mode == 'w') - { - *isWritable = 1; - return 1; - } - } - return 0; -} - -static void THPipeFile_free(THFile *self) -{ - THDiskFile *dfself = (THDiskFile*)(self); - if(dfself->handle) - pclose(dfself->handle); - THFree(dfself->name); - THFree(dfself); -} - -THFile *THPipeFile_new(const char *name, const char *mode, int isQuiet) -{ - static struct THFileVTable vtable = { - THDiskFile_isOpened, - - THDiskFile_readByte, - THDiskFile_readChar, - THDiskFile_readShort, - THDiskFile_readInt, - THDiskFile_readLong, - THDiskFile_readFloat, - THDiskFile_readDouble, - THDiskFile_readHalf, - THDiskFile_readString, - - THDiskFile_writeByte, - THDiskFile_writeChar, - THDiskFile_writeShort, - THDiskFile_writeInt, - THDiskFile_writeLong, - THDiskFile_writeFloat, - THDiskFile_writeDouble, - THDiskFile_writeHalf, - THDiskFile_writeString, - - THDiskFile_synchronize, - THDiskFile_seek, - THDiskFile_seekEnd, - THDiskFile_position, - THDiskFile_close, - THPipeFile_free - }; - - int isReadable; - int isWritable; - FILE *handle; - THDiskFile *self; - - THArgCheck(THPipeFile_mode(mode, &isReadable, &isWritable), 2, "file mode should be 'r','w'"); - -#ifdef _WIN32 - handle = _popen(name, (isReadable ? "rb" : "wb")); -#else - handle = popen(name, (isReadable ? "r" : "w")); -#endif - - if(!handle) - { - if(isQuiet) - return 0; - else - THError("cannot open <%s> in mode %c%c. This might be because eg the executable doesn't exist, but it could also be because you are out of memory.", name, (isReadable ? 'r' : ' '), (isWritable ? 'w' : ' ')); - } - - self = THAlloc(sizeof(THDiskFile)); - - self->handle = handle; - self->name = THAlloc(strlen(name)+1); - strcpy(self->name, name); - self->isNativeEncoding = 1; - self->longSize = 0; - - self->file.vtable = &vtable; - self->file.isQuiet = isQuiet; - self->file.isReadable = isReadable; - self->file.isWritable = isWritable; - self->file.isBinary = 0; - self->file.isAutoSpacing = 1; - self->file.hasError = 0; - - return (THFile*)self; -} diff --git a/contrib/lua-torch/torch7/lib/TH/THDiskFile.h b/contrib/lua-torch/torch7/lib/TH/THDiskFile.h deleted file mode 100644 index bc5c001c74..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THDiskFile.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef TH_DISK_FILE_INC -#define TH_DISK_FILE_INC - -#include "THFile.h" - -TH_API THFile *THDiskFile_new(const char *name, const char *mode, int isQuiet); -TH_API THFile *THPipeFile_new(const char *name, const char *mode, int isQuiet); - -TH_API const char *THDiskFile_name(THFile *self); - -TH_API int THDiskFile_isLittleEndianCPU(void); -TH_API int THDiskFile_isBigEndianCPU(void); -TH_API void THDiskFile_nativeEndianEncoding(THFile *self); -TH_API void THDiskFile_littleEndianEncoding(THFile *self); -TH_API void THDiskFile_bigEndianEncoding(THFile *self); -TH_API void THDiskFile_longSize(THFile *self, int size); -TH_API void THDiskFile_noBuffer(THFile *self); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THFile.c b/contrib/lua-torch/torch7/lib/TH/THFile.c deleted file mode 100644 index 3717b7b5cb..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THFile.c +++ /dev/null @@ -1,157 +0,0 @@ -#include "THFile.h" -#include "THFilePrivate.h" - -#define IMPLEMENT_THFILE_RW(TYPEC, TYPE) \ - size_t THFile_read##TYPEC##Raw(THFile *self, TYPE *data, size_t n) \ - { \ - return (*self->vtable->read##TYPEC)(self, data, n); \ - } \ - \ - size_t THFile_write##TYPEC##Raw(THFile *self, TYPE *data, size_t n) \ - { \ - return (*self->vtable->write##TYPEC)(self, data, n); \ - } - -IMPLEMENT_THFILE_RW(Byte, unsigned char) -IMPLEMENT_THFILE_RW(Char, char) -IMPLEMENT_THFILE_RW(Short, short) -IMPLEMENT_THFILE_RW(Int, int) -IMPLEMENT_THFILE_RW(Long, long) -IMPLEMENT_THFILE_RW(Float, float) -IMPLEMENT_THFILE_RW(Double, double) -IMPLEMENT_THFILE_RW(Half, THHalf) - -size_t THFile_readStringRaw(THFile *self, const char *format, char **str_) -{ - return self->vtable->readString(self, format, str_); -} - -size_t THFile_writeStringRaw(THFile *self, const char *str, size_t size) -{ - return self->vtable->writeString(self, str, size); -} - -void THFile_synchronize(THFile *self) -{ - self->vtable->synchronize(self); -} - -void THFile_seek(THFile *self, size_t position) -{ - self->vtable->seek(self, position); -} - -void THFile_seekEnd(THFile *self) -{ - self->vtable->seekEnd(self); -} - -size_t THFile_position(THFile *self) -{ - return self->vtable->position(self); -} - -void THFile_close(THFile *self) -{ - self->vtable->close(self); -} - -void THFile_free(THFile *self) -{ - self->vtable->free(self); -} - -int THFile_isOpened(THFile *self) -{ - return self->vtable->isOpened(self); -} - -#define IMPLEMENT_THFILE_FLAGS(FLAG) \ - int THFile_##FLAG(THFile *self) \ - { \ - return self->FLAG; \ - } - -IMPLEMENT_THFILE_FLAGS(isQuiet) -IMPLEMENT_THFILE_FLAGS(isReadable) -IMPLEMENT_THFILE_FLAGS(isWritable) -IMPLEMENT_THFILE_FLAGS(isBinary) -IMPLEMENT_THFILE_FLAGS(isAutoSpacing) -IMPLEMENT_THFILE_FLAGS(hasError) - -void THFile_binary(THFile *self) -{ - self->isBinary = 1; -} - -void THFile_ascii(THFile *self) -{ - self->isBinary = 0; -} - -void THFile_autoSpacing(THFile *self) -{ - self->isAutoSpacing = 1; -} - -void THFile_noAutoSpacing(THFile *self) -{ - self->isAutoSpacing = 0; -} - -void THFile_quiet(THFile *self) -{ - self->isQuiet = 1; -} - -void THFile_pedantic(THFile *self) -{ - self->isQuiet = 0; -} - -void THFile_clearError(THFile *self) -{ - self->hasError = 0; -} - -#define IMPLEMENT_THFILE_SCALAR(TYPEC, TYPE) \ - TYPE THFile_read##TYPEC##Scalar(THFile *self) \ - { \ - TYPE scalar; \ - THFile_read##TYPEC##Raw(self, &scalar, 1); \ - return scalar; \ - } \ - \ - void THFile_write##TYPEC##Scalar(THFile *self, TYPE scalar) \ - { \ - THFile_write##TYPEC##Raw(self, &scalar, 1); \ - } - -IMPLEMENT_THFILE_SCALAR(Byte, unsigned char) -IMPLEMENT_THFILE_SCALAR(Char, char) -IMPLEMENT_THFILE_SCALAR(Short, short) -IMPLEMENT_THFILE_SCALAR(Int, int) -IMPLEMENT_THFILE_SCALAR(Long, long) -IMPLEMENT_THFILE_SCALAR(Float, float) -IMPLEMENT_THFILE_SCALAR(Double, double) -IMPLEMENT_THFILE_SCALAR(Half, THHalf) - -#define IMPLEMENT_THFILE_STORAGE(TYPEC, TYPE) \ - size_t THFile_read##TYPEC(THFile *self, TH##TYPEC##Storage *storage) \ - { \ - return THFile_read##TYPEC##Raw(self, storage->data, storage->size); \ - } \ - \ - size_t THFile_write##TYPEC(THFile *self, TH##TYPEC##Storage *storage) \ - { \ - return THFile_write##TYPEC##Raw(self, storage->data, storage->size); \ - } - -IMPLEMENT_THFILE_STORAGE(Byte, unsigned char) -IMPLEMENT_THFILE_STORAGE(Char, char) -IMPLEMENT_THFILE_STORAGE(Short, short) -IMPLEMENT_THFILE_STORAGE(Int, int) -IMPLEMENT_THFILE_STORAGE(Long, long) -IMPLEMENT_THFILE_STORAGE(Float, float) -IMPLEMENT_THFILE_STORAGE(Double, double) -IMPLEMENT_THFILE_STORAGE(Half, THHalf) diff --git a/contrib/lua-torch/torch7/lib/TH/THFile.h b/contrib/lua-torch/torch7/lib/TH/THFile.h deleted file mode 100644 index e097bdf343..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THFile.h +++ /dev/null @@ -1,91 +0,0 @@ -#ifndef TH_FILE_INC -#define TH_FILE_INC - -#include "THStorage.h" - -typedef struct THFile__ THFile; - -TH_API int THFile_isOpened(THFile *self); -TH_API int THFile_isQuiet(THFile *self); -TH_API int THFile_isReadable(THFile *self); -TH_API int THFile_isWritable(THFile *self); -TH_API int THFile_isBinary(THFile *self); -TH_API int THFile_isAutoSpacing(THFile *self); -TH_API int THFile_hasError(THFile *self); - -TH_API void THFile_binary(THFile *self); -TH_API void THFile_ascii(THFile *self); -TH_API void THFile_autoSpacing(THFile *self); -TH_API void THFile_noAutoSpacing(THFile *self); -TH_API void THFile_quiet(THFile *self); -TH_API void THFile_pedantic(THFile *self); -TH_API void THFile_clearError(THFile *self); - -/* scalar */ -TH_API unsigned char THFile_readByteScalar(THFile *self); -TH_API char THFile_readCharScalar(THFile *self); -TH_API short THFile_readShortScalar(THFile *self); -TH_API int THFile_readIntScalar(THFile *self); -TH_API long THFile_readLongScalar(THFile *self); -TH_API float THFile_readFloatScalar(THFile *self); -TH_API double THFile_readDoubleScalar(THFile *self); - -TH_API void THFile_writeByteScalar(THFile *self, unsigned char scalar); -TH_API void THFile_writeCharScalar(THFile *self, char scalar); -TH_API void THFile_writeShortScalar(THFile *self, short scalar); -TH_API void THFile_writeIntScalar(THFile *self, int scalar); -TH_API void THFile_writeLongScalar(THFile *self, long scalar); -TH_API void THFile_writeFloatScalar(THFile *self, float scalar); -TH_API void THFile_writeDoubleScalar(THFile *self, double scalar); - -/* storage */ -TH_API size_t THFile_readByte(THFile *self, THByteStorage *storage); -TH_API size_t THFile_readChar(THFile *self, THCharStorage *storage); -TH_API size_t THFile_readShort(THFile *self, THShortStorage *storage); -TH_API size_t THFile_readInt(THFile *self, THIntStorage *storage); -TH_API size_t THFile_readLong(THFile *self, THLongStorage *storage); -TH_API size_t THFile_readFloat(THFile *self, THFloatStorage *storage); -TH_API size_t THFile_readDouble(THFile *self, THDoubleStorage *storage); - -TH_API size_t THFile_writeByte(THFile *self, THByteStorage *storage); -TH_API size_t THFile_writeChar(THFile *self, THCharStorage *storage); -TH_API size_t THFile_writeShort(THFile *self, THShortStorage *storage); -TH_API size_t THFile_writeInt(THFile *self, THIntStorage *storage); -TH_API size_t THFile_writeLong(THFile *self, THLongStorage *storage); -TH_API size_t THFile_writeFloat(THFile *self, THFloatStorage *storage); -TH_API size_t THFile_writeDouble(THFile *self, THDoubleStorage *storage); - -/* raw */ -TH_API size_t THFile_readByteRaw(THFile *self, unsigned char *data, size_t n); -TH_API size_t THFile_readCharRaw(THFile *self, char *data, size_t n); -TH_API size_t THFile_readShortRaw(THFile *self, short *data, size_t n); -TH_API size_t THFile_readIntRaw(THFile *self, int *data, size_t n); -TH_API size_t THFile_readLongRaw(THFile *self, long *data, size_t n); -TH_API size_t THFile_readFloatRaw(THFile *self, float *data, size_t n); -TH_API size_t THFile_readDoubleRaw(THFile *self, double *data, size_t n); -TH_API size_t THFile_readStringRaw(THFile *self, const char *format, char **str_); /* you must deallocate str_ */ - -TH_API size_t THFile_writeByteRaw(THFile *self, unsigned char *data, size_t n); -TH_API size_t THFile_writeCharRaw(THFile *self, char *data, size_t n); -TH_API size_t THFile_writeShortRaw(THFile *self, short *data, size_t n); -TH_API size_t THFile_writeIntRaw(THFile *self, int *data, size_t n); -TH_API size_t THFile_writeLongRaw(THFile *self, long *data, size_t n); -TH_API size_t THFile_writeFloatRaw(THFile *self, float *data, size_t n); -TH_API size_t THFile_writeDoubleRaw(THFile *self, double *data, size_t n); -TH_API size_t THFile_writeStringRaw(THFile *self, const char *str, size_t size); - -TH_API THHalf THFile_readHalfScalar(THFile *self); -TH_API void THFile_writeHalfScalar(THFile *self, THHalf scalar); -TH_API size_t THFile_readHalf(THFile *self, THHalfStorage *storage); -TH_API size_t THFile_writeHalf(THFile *self, THHalfStorage *storage); -TH_API size_t THFile_readHalfRaw(THFile *self, THHalf* data, size_t size); -TH_API size_t THFile_writeHalfRaw(THFile *self, THHalf* data, size_t size); - -TH_API void THFile_synchronize(THFile *self); -TH_API void THFile_seek(THFile *self, size_t position); -TH_API void THFile_seekEnd(THFile *self); -TH_API size_t THFile_position(THFile *self); -TH_API void THFile_close(THFile *self); -TH_API void THFile_free(THFile *self); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THFilePrivate.h b/contrib/lua-torch/torch7/lib/TH/THFilePrivate.h deleted file mode 100644 index 55169c3bc7..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THFilePrivate.h +++ /dev/null @@ -1,50 +0,0 @@ -#include "THGeneral.h" - -#include "THHalf.h" - - -struct THFile__ -{ - struct THFileVTable *vtable; - - int isQuiet; - int isReadable; - int isWritable; - int isBinary; - int isAutoSpacing; - int hasError; -}; - -/* virtual table definition */ - -struct THFileVTable -{ - int (*isOpened)(THFile *self); - - size_t (*readByte)(THFile *self, unsigned char *data, size_t n); - size_t (*readChar)(THFile *self, char *data, size_t n); - size_t (*readShort)(THFile *self, short *data, size_t n); - size_t (*readInt)(THFile *self, int *data, size_t n); - size_t (*readLong)(THFile *self, long *data, size_t n); - size_t (*readFloat)(THFile *self, float *data, size_t n); - size_t (*readDouble)(THFile *self, double *data, size_t n); - size_t (*readHalf)(THFile *self, THHalf *data, size_t n); - size_t (*readString)(THFile *self, const char *format, char **str_); - - size_t (*writeByte)(THFile *self, unsigned char *data, size_t n); - size_t (*writeChar)(THFile *self, char *data, size_t n); - size_t (*writeShort)(THFile *self, short *data, size_t n); - size_t (*writeInt)(THFile *self, int *data, size_t n); - size_t (*writeLong)(THFile *self, long *data, size_t n); - size_t (*writeFloat)(THFile *self, float *data, size_t n); - size_t (*writeDouble)(THFile *self, double *data, size_t n); - size_t (*writeHalf)(THFile *self, THHalf *data, size_t n); - size_t (*writeString)(THFile *self, const char *str, size_t size); - - void (*synchronize)(THFile *self); - void (*seek)(THFile *self, size_t position); - void (*seekEnd)(THFile *self); - size_t (*position)(THFile *self); - void (*close)(THFile *self); - void (*free)(THFile *self); -}; diff --git a/contrib/lua-torch/torch7/lib/TH/THGeneral.c b/contrib/lua-torch/torch7/lib/TH/THGeneral.c deleted file mode 100644 index f093c422f1..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGeneral.c +++ /dev/null @@ -1,406 +0,0 @@ -#include "THGeneral.h" -#include "THAtomic.h" - -#ifdef _OPENMP -#include -#endif - -#ifndef TH_HAVE_THREAD -#define __thread -#elif _MSC_VER -#define __thread __declspec( thread ) -#endif - -#if defined(__APPLE__) -#include -#endif - -#if defined(__linux__) -#include -#endif - -#if defined(__FreeBSD__) -#include -#endif - -/* Torch Error Handling */ -static void defaultErrorHandlerFunction(const char *msg, void *data) -{ - printf("$ Error: %s\n", msg); - abort(); -} - -static THErrorHandlerFunction defaultErrorHandler = defaultErrorHandlerFunction; -static void *defaultErrorHandlerData; -static __thread THErrorHandlerFunction threadErrorHandler = NULL; -static __thread void *threadErrorHandlerData; - -void _THError(const char *file, const int line, const char *fmt, ...) -{ - char msg[2048]; - va_list args; - - /* vasprintf not standard */ - /* vsnprintf: how to handle if does not exists? */ - va_start(args, fmt); - int n = vsnprintf(msg, 2048, fmt, args); - va_end(args); - - if(n < 2048) { - snprintf(msg + n, 2048 - n, " at %s:%d", file, line); - } - - if (threadErrorHandler) - (*threadErrorHandler)(msg, threadErrorHandlerData); - else - (*defaultErrorHandler)(msg, defaultErrorHandlerData); -} - -void _THAssertionFailed(const char *file, const int line, const char *exp, const char *fmt, ...) { - char msg[1024]; - va_list args; - va_start(args, fmt); - vsnprintf(msg, 1024, fmt, args); - va_end(args); - _THError(file, line, "Assertion `%s' failed. %s", exp, msg); -} - -void THSetErrorHandler(THErrorHandlerFunction new_handler, void *data) -{ - threadErrorHandler = new_handler; - threadErrorHandlerData = data; -} - -void THSetDefaultErrorHandler(THErrorHandlerFunction new_handler, void *data) -{ - if (new_handler) - defaultErrorHandler = new_handler; - else - defaultErrorHandler = defaultErrorHandlerFunction; - defaultErrorHandlerData = data; -} - -/* Torch Arg Checking Handling */ -static void defaultArgErrorHandlerFunction(int argNumber, const char *msg, void *data) -{ - if(msg) - printf("$ Invalid argument %d: %s\n", argNumber, msg); - else - printf("$ Invalid argument %d\n", argNumber); - exit(-1); -} - -static THArgErrorHandlerFunction defaultArgErrorHandler = defaultArgErrorHandlerFunction; -static void *defaultArgErrorHandlerData; -static __thread THArgErrorHandlerFunction threadArgErrorHandler = NULL; -static __thread void *threadArgErrorHandlerData; - -void _THArgCheck(const char *file, int line, int condition, int argNumber, const char *fmt, ...) -{ - if(!condition) { - char msg[2048]; - va_list args; - - /* vasprintf not standard */ - /* vsnprintf: how to handle if does not exists? */ - va_start(args, fmt); - int n = vsnprintf(msg, 2048, fmt, args); - va_end(args); - - if(n < 2048) { - snprintf(msg + n, 2048 - n, " at %s:%d", file, line); - } - - if (threadArgErrorHandler) - (*threadArgErrorHandler)(argNumber, msg, threadArgErrorHandlerData); - else - (*defaultArgErrorHandler)(argNumber, msg, defaultArgErrorHandlerData); - } -} - -void THSetArgErrorHandler(THArgErrorHandlerFunction new_handler, void *data) -{ - threadArgErrorHandler = new_handler; - threadArgErrorHandlerData = data; -} - -void THSetDefaultArgErrorHandler(THArgErrorHandlerFunction new_handler, void *data) -{ - if (new_handler) - defaultArgErrorHandler = new_handler; - else - defaultArgErrorHandler = defaultArgErrorHandlerFunction; - defaultArgErrorHandlerData = data; -} - -static __thread void (*torchGCFunction)(void *data) = NULL; -static __thread void *torchGCData; -static ptrdiff_t heapSize = 0; -static __thread ptrdiff_t heapDelta = 0; -static const ptrdiff_t heapMaxDelta = (ptrdiff_t)1e6; // limit to +/- 1MB before updating heapSize -static const ptrdiff_t heapMinDelta = (ptrdiff_t)-1e6; -static __thread ptrdiff_t heapSoftmax = (ptrdiff_t)3e8; // 300MB, adjusted upward dynamically -static const double heapSoftmaxGrowthThresh = 0.8; // grow softmax if >80% max after GC -static const double heapSoftmaxGrowthFactor = 1.4; // grow softmax by 40% - -/* Optional hook for integrating with a garbage-collected frontend. - * - * If torch is running with a garbage-collected frontend (e.g. Lua), - * the GC isn't aware of TH-allocated memory so may not know when it - * needs to run. These hooks trigger the GC to run in two cases: - * - * (1) When a memory allocation (malloc, realloc, ...) fails - * (2) When the total TH-allocated memory hits a dynamically-adjusted - * soft maximum. - */ -void THSetGCHandler( void (*torchGCFunction_)(void *data), void *data ) -{ - torchGCFunction = torchGCFunction_; - torchGCData = data; -} - -/* it is guaranteed the allocated size is not bigger than PTRDIFF_MAX */ -static ptrdiff_t getAllocSize(void *ptr) { -#if defined(__unix) && defined(HAVE_MALLOC_USABLE_SIZE) - return malloc_usable_size(ptr); -#elif defined(__APPLE__) - return malloc_size(ptr); -#elif defined(_WIN32) - if(ptr) { return _msize(ptr); } else { return 0; } -#else - return 0; -#endif -} - -static ptrdiff_t applyHeapDelta() { - ptrdiff_t oldHeapSize = THAtomicAddPtrdiff(&heapSize, heapDelta); -#ifdef DEBUG - if (heapDelta > 0 && oldHeapSize > PTRDIFF_MAX - heapDelta) - THError("applyHeapDelta: heapSize(%td) + increased(%td) > PTRDIFF_MAX, heapSize overflow!", oldHeapSize, heapDelta); - if (heapDelta < 0 && oldHeapSize < PTRDIFF_MIN - heapDelta) - THError("applyHeapDelta: heapSize(%td) + decreased(%td) < PTRDIFF_MIN, heapSize underflow!", oldHeapSize, heapDelta); -#endif - ptrdiff_t newHeapSize = oldHeapSize + heapDelta; - heapDelta = 0; - return newHeapSize; -} - -/* (1) if the torch-allocated heap size exceeds the soft max, run GC - * (2) if post-GC heap size exceeds 80% of the soft max, increase the - * soft max by 40% - */ -static void maybeTriggerGC(ptrdiff_t curHeapSize) { - if (torchGCFunction && curHeapSize > heapSoftmax) { - torchGCFunction(torchGCData); - - // ensure heapSize is accurate before updating heapSoftmax - ptrdiff_t newHeapSize = applyHeapDelta(); - - if (newHeapSize > heapSoftmax * heapSoftmaxGrowthThresh) { - heapSoftmax = (ptrdiff_t)(heapSoftmax * heapSoftmaxGrowthFactor); - } - } -} - -// hooks into the TH heap tracking -void THHeapUpdate(ptrdiff_t size) { -#ifdef DEBUG - if (size > 0 && heapDelta > PTRDIFF_MAX - size) - THError("THHeapUpdate: heapDelta(%td) + increased(%td) > PTRDIFF_MAX, heapDelta overflow!", heapDelta, size); - if (size < 0 && heapDelta < PTRDIFF_MIN - size) - THError("THHeapUpdate: heapDelta(%td) + decreased(%td) < PTRDIFF_MIN, heapDelta underflow!", heapDelta, size); -#endif - - heapDelta += size; - - // batch updates to global heapSize to minimize thread contention - if (heapDelta < heapMaxDelta && heapDelta > heapMinDelta) { - return; - } - - ptrdiff_t newHeapSize = applyHeapDelta(); - - if (size > 0) { - maybeTriggerGC(newHeapSize); - } -} - -static void* THAllocInternal(ptrdiff_t size) -{ - void *ptr; - - if (size > 5120) - { -#if (defined(__unix) || defined(__APPLE__)) && (!defined(DISABLE_POSIX_MEMALIGN)) - if (posix_memalign(&ptr, 64, size) != 0) - ptr = NULL; -/* -#elif defined(_WIN32) - ptr = _aligned_malloc(size, 64); -*/ -#else - ptr = malloc(size); -#endif - } - else - { - ptr = malloc(size); - } - - THHeapUpdate(getAllocSize(ptr)); - return ptr; -} - -void* THAlloc(ptrdiff_t size) -{ - void *ptr; - - if(size < 0) - THError("$ Torch: invalid memory size -- maybe an overflow?"); - - if(size == 0) - return NULL; - - ptr = THAllocInternal(size); - - if(!ptr && torchGCFunction) { - torchGCFunction(torchGCData); - ptr = THAllocInternal(size); - } - - if(!ptr) - THError("$ Torch: not enough memory: you tried to allocate %dGB. Buy new RAM!", size/1073741824); - - return ptr; -} - -void* THRealloc(void *ptr, ptrdiff_t size) -{ - if(!ptr) - return(THAlloc(size)); - - if(size == 0) - { - THFree(ptr); - return NULL; - } - - if(size < 0) - THError("$ Torch: invalid memory size -- maybe an overflow?"); - - ptrdiff_t oldSize = -getAllocSize(ptr); - void *newptr = realloc(ptr, size); - - if(!newptr && torchGCFunction) { - torchGCFunction(torchGCData); - newptr = realloc(ptr, size); - } - - if(!newptr) - THError("$ Torch: not enough memory: you tried to reallocate %dGB. Buy new RAM!", size/1073741824); - - // update heapSize only after successfully reallocated - THHeapUpdate(oldSize + getAllocSize(newptr)); - - return newptr; -} - -void THFree(void *ptr) -{ - THHeapUpdate(-getAllocSize(ptr)); - free(ptr); -} - -double THLog1p(const double x) -{ -#if (defined(_MSC_VER) || defined(__MINGW32__)) - volatile double y = 1 + x; - return log(y) - ((y-1)-x)/y ; /* cancels errors with IEEE arithmetic */ -#else - return log1p(x); -#endif -} - -void THSetNumThreads(int num_threads) -{ -#ifdef _OPENMP - omp_set_num_threads(num_threads); -#endif -#ifdef TH_BLAS_OPEN - extern void openblas_set_num_threads(int); - openblas_set_num_threads(num_threads); -#endif -#ifdef TH_BLAS_MKL - extern void mkl_set_num_threads(int); - mkl_set_num_threads(num_threads); - -#endif -} - -int THGetNumThreads(void) -{ - int nthreads = 1; -#ifdef _OPENMP - nthreads = omp_get_max_threads(); -#endif -#ifdef TH_BLAS_OPEN - int bl_threads = 1; - extern int openblas_get_num_threads(void); - bl_threads = openblas_get_num_threads(); - nthreads = nthreads > bl_threads ? bl_threads : nthreads; -#endif -#ifdef TH_BLAS_MKL - int bl_threads = 1; - extern int mkl_get_max_threads(void); - bl_threads = mkl_get_max_threads(); - nthreads = nthreads > bl_threads ? bl_threads : nthreads; -#endif - return nthreads; -} - -int THGetNumCores(void) -{ -#ifdef _OPENMP - return omp_get_num_procs(); -#else - return 1; -#endif -} - -#ifdef TH_BLAS_MKL -extern int mkl_get_max_threads(void); -#endif - -TH_API void THInferNumThreads(void) -{ -#if defined(_OPENMP) && defined(TH_BLAS_MKL) - // If we are using MKL an OpenMP make sure the number of threads match. - // Otherwise, MKL and our OpenMP-enabled functions will keep changing the - // size of the OpenMP thread pool, resulting in worse performance (and memory - // leaks in GCC 5.4) - omp_set_num_threads(mkl_get_max_threads()); -#endif -} - -TH_API THDescBuff _THSizeDesc(const long *size, const long ndim) { - const int L = TH_DESC_BUFF_LEN; - THDescBuff buf; - char *str = buf.str; - int n = 0; - n += snprintf(str, L-n, "["); - int i; - for(i = 0; i < ndim; i++) { - if(n >= L) break; - n += snprintf(str+n, L-n, "%ld", size[i]); - if(i < ndim-1) { - n += snprintf(str+n, L-n, " x "); - } - } - if(n < L - 2) { - snprintf(str+n, L-n, "]"); - } else { - snprintf(str+L-5, 5, "...]"); - } - return buf; -} - diff --git a/contrib/lua-torch/torch7/lib/TH/THGeneral.h.in b/contrib/lua-torch/torch7/lib/TH/THGeneral.h.in deleted file mode 100644 index 88a3934c8c..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGeneral.h.in +++ /dev/null @@ -1,130 +0,0 @@ -#ifndef TH_GENERAL_INC -#define TH_GENERAL_INC - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#cmakedefine USE_BLAS -#cmakedefine USE_LAPACK -#cmakedefine BLAS_F2C - -#ifdef __cplusplus -# define TH_EXTERNC extern "C" -#else -# define TH_EXTERNC extern -#endif - -#ifdef _WIN32 -# ifdef TH_EXPORTS -# define TH_API TH_EXTERNC __declspec(dllexport) -# else -# define TH_API TH_EXTERNC __declspec(dllimport) -# endif -#else -# define TH_API TH_EXTERNC -#endif - -#ifndef M_PI -# define M_PI 3.14159265358979323846 -#endif - -#ifndef TH_INDEX_BASE -#define TH_INDEX_BASE 1 -#endif - -typedef void (*THErrorHandlerFunction)(const char *msg, void *data); -typedef void (*THArgErrorHandlerFunction)(int argNumber, const char *msg, void *data); - -#define TH_DESC_BUFF_LEN 64 -typedef struct { - char str[TH_DESC_BUFF_LEN]; -} THDescBuff; - - -TH_API double THLog1p(const double x); -TH_API THDescBuff _THSizeDesc(const long *size, const long ndim); -TH_API void _THError(const char *file, const int line, const char *fmt, ...); -TH_API void _THAssertionFailed(const char *file, const int line, const char *exp, const char *fmt, ...); -TH_API void THSetErrorHandler(THErrorHandlerFunction new_handler, void *data); -TH_API void THSetDefaultErrorHandler(THErrorHandlerFunction new_handler, void *data); -TH_API void _THArgCheck(const char *file, int line, int condition, int argNumber, const char *fmt, ...); -TH_API void THSetArgErrorHandler(THArgErrorHandlerFunction new_handler, void *data); -TH_API void THSetDefaultArgErrorHandler(THArgErrorHandlerFunction new_handler, void *data); -TH_API void* THAlloc(ptrdiff_t size); -TH_API void* THRealloc(void *ptr, ptrdiff_t size); -TH_API void THFree(void *ptr); -TH_API void THSetGCHandler( void (*torchGCHandlerFunction)(void *data), void *data ); -// this hook should only be called by custom allocator functions -TH_API void THHeapUpdate(ptrdiff_t size); -TH_API void THSetNumThreads(int num_threads); -TH_API int THGetNumThreads(void); -TH_API int THGetNumCores(void); -TH_API void THInferNumThreads(void); - -#define THError(...) _THError(__FILE__, __LINE__, __VA_ARGS__) - -#define THCleanup(...) __VA_ARGS__ - -#define THArgCheck(...) \ -do { \ - _THArgCheck(__FILE__, __LINE__, __VA_ARGS__); \ -} while(0) - -#define THArgCheckWithCleanup(condition, cleanup, ...) \ -do if (!(condition)) { \ - cleanup \ - _THArgCheck(__FILE__, __LINE__, 0, __VA_ARGS__); \ -} while(0) - -#define THAssert(exp) \ -do { \ - if (!(exp)) { \ - _THAssertionFailed(__FILE__, __LINE__, #exp, ""); \ - } \ -} while(0) - -#define THAssertMsg(exp, ...) \ -do { \ - if (!(exp)) { \ - _THAssertionFailed(__FILE__, __LINE__, #exp, __VA_ARGS__); \ - } \ -} while(0) - -#define TH_CONCAT_STRING_2(x,y) TH_CONCAT_STRING_2_EXPAND(x,y) -#define TH_CONCAT_STRING_2_EXPAND(x,y) #x #y - -#define TH_CONCAT_STRING_3(x,y,z) TH_CONCAT_STRING_3_EXPAND(x,y,z) -#define TH_CONCAT_STRING_3_EXPAND(x,y,z) #x #y #z - -#define TH_CONCAT_STRING_4(x,y,z,w) TH_CONCAT_STRING_4_EXPAND(x,y,z,w) -#define TH_CONCAT_STRING_4_EXPAND(x,y,z,w) #x #y #z #w - -#define TH_CONCAT_2(x,y) TH_CONCAT_2_EXPAND(x,y) -#define TH_CONCAT_2_EXPAND(x,y) x ## y - -#define TH_CONCAT_3(x,y,z) TH_CONCAT_3_EXPAND(x,y,z) -#define TH_CONCAT_3_EXPAND(x,y,z) x ## y ## z - -#define TH_CONCAT_4_EXPAND(x,y,z,w) x ## y ## z ## w -#define TH_CONCAT_4(x,y,z,w) TH_CONCAT_4_EXPAND(x,y,z,w) - -#define THMin(X, Y) ((X) < (Y) ? (X) : (Y)) -#define THMax(X, Y) ((X) > (Y) ? (X) : (Y)) - -#if (defined(_MSC_VER) || defined(__MINGW32__)) -# define log1p(x) THLog1p(x) -#define snprintf _snprintf -#define popen _popen -#define pclose _pclose -#include -typedef SSIZE_T ssize_t; -#endif - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateAllTypes.h b/contrib/lua-torch/torch7/lib/TH/THGenerateAllTypes.h deleted file mode 100644 index 5b9508df76..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateAllTypes.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateAllTypes.h" -#endif - -#ifndef THGenerateManyTypes -#define THAllLocalGenerateManyTypes -#define THGenerateManyTypes -#endif - -#include "THGenerateFloatTypes.h" -#include "THGenerateIntTypes.h" - -#ifdef THAllLocalGenerateManyTypes -#undef THAllLocalGenerateManyTypes -#undef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateByteType.h b/contrib/lua-torch/torch7/lib/TH/THGenerateByteType.h deleted file mode 100644 index 71ce7c405c..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateByteType.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateByteType.h" -#endif - -#define real unsigned char -#define accreal long -#define Real Byte -#define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) -#define THInf UCHAR_MAX -#define TH_REAL_IS_BYTE -#line 1 TH_GENERIC_FILE -#include TH_GENERIC_FILE -#undef real -#undef accreal -#undef Real -#undef THInf -#undef TH_REAL_IS_BYTE -#undef TH_CONVERT_REAL_TO_ACCREAL -#undef TH_CONVERT_ACCREAL_TO_REAL - -#ifndef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateCharType.h b/contrib/lua-torch/torch7/lib/TH/THGenerateCharType.h deleted file mode 100644 index 158dd0e803..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateCharType.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateCharType.h" -#endif - -#define real char -#define accreal long -#define Real Char -#define THInf CHAR_MAX -#define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) -#define TH_REAL_IS_CHAR -#line 1 TH_GENERIC_FILE -#include TH_GENERIC_FILE -#undef real -#undef accreal -#undef Real -#undef THInf -#undef TH_REAL_IS_CHAR -#undef TH_CONVERT_REAL_TO_ACCREAL -#undef TH_CONVERT_ACCREAL_TO_REAL - -#ifndef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateDoubleType.h b/contrib/lua-torch/torch7/lib/TH/THGenerateDoubleType.h deleted file mode 100644 index fffee606da..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateDoubleType.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateDoubleType.h" -#endif - -#define real double -#define accreal double -#define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) -#define Real Double -#define THInf DBL_MAX -#define TH_REAL_IS_DOUBLE -#line 1 TH_GENERIC_FILE -#include TH_GENERIC_FILE -#undef accreal -#undef real -#undef Real -#undef THInf -#undef TH_REAL_IS_DOUBLE -#undef TH_CONVERT_REAL_TO_ACCREAL -#undef TH_CONVERT_ACCREAL_TO_REAL - -#ifndef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateFloatType.h b/contrib/lua-torch/torch7/lib/TH/THGenerateFloatType.h deleted file mode 100644 index a31b50c55c..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateFloatType.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateFloatType.h" -#endif - -#define real float -#define accreal double -#define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) -#define Real Float -#define THInf FLT_MAX -#define TH_REAL_IS_FLOAT -#line 1 TH_GENERIC_FILE -#include TH_GENERIC_FILE -#undef accreal -#undef real -#undef Real -#undef THInf -#undef TH_REAL_IS_FLOAT -#undef TH_CONVERT_REAL_TO_ACCREAL -#undef TH_CONVERT_ACCREAL_TO_REAL - -#ifndef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateFloatTypes.h b/contrib/lua-torch/torch7/lib/TH/THGenerateFloatTypes.h deleted file mode 100644 index be5ea8403a..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateFloatTypes.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateFloatTypes.h" -#endif - -#ifndef THGenerateManyTypes -#define THFloatLocalGenerateManyTypes -#define THGenerateManyTypes -#endif - -#include "THGenerateFloatType.h" -#include "THGenerateDoubleType.h" - -#ifdef THFloatLocalGenerateManyTypes -#undef THFloatLocalGenerateManyTypes -#undef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateHalfType.h b/contrib/lua-torch/torch7/lib/TH/THGenerateHalfType.h deleted file mode 100644 index 47ff1e8d7a..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateHalfType.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateHalfType.h" -#endif - -#include "THHalf.h" -#define real THHalf -#define accreal float -#define TH_CONVERT_REAL_TO_ACCREAL(_val) TH_half2float(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) TH_float2half(_val) -#define Real Half -#define THInf TH_HALF_BITS_TO_LITERAL(TH_HALF_INF) -#define TH_REAL_IS_HALF -#line 1 TH_GENERIC_FILE -#include TH_GENERIC_FILE -#undef real -#undef accreal -#undef Real -#undef THInf -#undef TH_REAL_IS_HALF -#undef TH_CONVERT_REAL_TO_ACCREAL -#undef TH_CONVERT_ACCREAL_TO_REAL - -#ifndef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateIntType.h b/contrib/lua-torch/torch7/lib/TH/THGenerateIntType.h deleted file mode 100644 index 1562b9e986..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateIntType.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateIntType.h" -#endif - -#define real int -#define accreal long -#define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) -#define Real Int -#define THInf INT_MAX -#define TH_REAL_IS_INT -#line 1 TH_GENERIC_FILE -#include TH_GENERIC_FILE -#undef real -#undef accreal -#undef Real -#undef THInf -#undef TH_REAL_IS_INT -#undef TH_CONVERT_REAL_TO_ACCREAL -#undef TH_CONVERT_ACCREAL_TO_REAL - -#ifndef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateIntTypes.h b/contrib/lua-torch/torch7/lib/TH/THGenerateIntTypes.h deleted file mode 100644 index 9931fb1f59..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateIntTypes.h +++ /dev/null @@ -1,20 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateIntTypes.h" -#endif - -#ifndef THGenerateManyTypes -#define THIntLocalGenerateManyTypes -#define THGenerateManyTypes -#endif - -#include "THGenerateByteType.h" -#include "THGenerateCharType.h" -#include "THGenerateShortType.h" -#include "THGenerateIntType.h" -#include "THGenerateLongType.h" - -#ifdef THIntLocalGenerateManyTypes -#undef THIntLocalGenerateManyTypes -#undef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateLongType.h b/contrib/lua-torch/torch7/lib/TH/THGenerateLongType.h deleted file mode 100644 index 75f90e1a60..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateLongType.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateLongType.h" -#endif - -#define real long -#define accreal long -#define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) -#define Real Long -#define THInf LONG_MAX -#define TH_REAL_IS_LONG -#line 1 TH_GENERIC_FILE -#include TH_GENERIC_FILE -#undef real -#undef accreal -#undef Real -#undef THInf -#undef TH_REAL_IS_LONG -#undef TH_CONVERT_REAL_TO_ACCREAL -#undef TH_CONVERT_ACCREAL_TO_REAL - -#ifndef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THGenerateShortType.h b/contrib/lua-torch/torch7/lib/TH/THGenerateShortType.h deleted file mode 100644 index 047e51a8d7..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THGenerateShortType.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef TH_GENERIC_FILE -#error "You must define TH_GENERIC_FILE before including THGenerateShortType.h" -#endif - -#define real short -#define accreal long -#define TH_CONVERT_REAL_TO_ACCREAL(_val) (accreal)(_val) -#define TH_CONVERT_ACCREAL_TO_REAL(_val) (real)(_val) -#define Real Short -#define THInf SHRT_MAX -#define TH_REAL_IS_SHORT -#line 1 TH_GENERIC_FILE -#include TH_GENERIC_FILE -#undef real -#undef accreal -#undef Real -#undef THInf -#undef TH_REAL_IS_SHORT -#undef TH_CONVERT_REAL_TO_ACCREAL -#undef TH_CONVERT_ACCREAL_TO_REAL - -#ifndef THGenerateManyTypes -#undef TH_GENERIC_FILE -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THHalf.c b/contrib/lua-torch/torch7/lib/TH/THHalf.c deleted file mode 100644 index d7468ac3d2..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THHalf.c +++ /dev/null @@ -1,100 +0,0 @@ -#include "THHalf.h" - -/* Copyright 1993-2014 NVIDIA Corporation. All rights reserved. */ - -THHalf TH_float2half(float f) -{ - THHalf h; - TH_float2halfbits(&f, &h.x); - return h; -} - -TH_API float TH_half2float(THHalf h) -{ - float f; - TH_halfbits2float(&h.x, &f); - return f; -} - -// Host functions for converting between FP32 and FP16 formats - -void TH_halfbits2float(unsigned short* src, float* res) -{ - unsigned h = *src; - unsigned sign = ((h >> 15) & 1); - unsigned exponent = ((h >> 10) & 0x1f); - unsigned mantissa = ((h & 0x3ff) << 13); - - if (exponent == 0x1f) { /* NaN or Inf */ - mantissa = (mantissa ? (sign = 0, 0x7fffff) : 0); - exponent = 0xff; - } else if (!exponent) { /* Denorm or Zero */ - if (mantissa) { - unsigned int msb; - exponent = 0x71; - do { - msb = (mantissa & 0x400000); - mantissa <<= 1; /* normalize */ - --exponent; - } while (!msb); - mantissa &= 0x7fffff; /* 1.mantissa is implicit */ - } - } else { - exponent += 0x70; - } - - *(unsigned*)res = ((sign << 31) | (exponent << 23) | mantissa); -} - -void TH_float2halfbits(float* src, unsigned short* dest) -{ - unsigned x = *(unsigned*)src; - unsigned u = (x & 0x7fffffff), remainder, shift, lsb, lsb_s1, lsb_m1; - unsigned sign, exponent, mantissa; - - // Get rid of +NaN/-NaN case first. - if (u > 0x7f800000) { - *dest = 0x7fffU; - return ; - } - - sign = ((x >> 16) & 0x8000); - - // Get rid of +Inf/-Inf, +0/-0. - if (u > 0x477fefff) { - *dest = sign | 0x7c00U; - return; - } - if (u < 0x33000001) { - *dest = (sign | 0x0000); - return; - } - - exponent = ((u >> 23) & 0xff); - mantissa = (u & 0x7fffff); - - if (exponent > 0x70) { - shift = 13; - exponent -= 0x70; - } else { - shift = 0x7e - exponent; - exponent = 0; - mantissa |= 0x800000; - } - lsb = (1 << shift); - lsb_s1 = (lsb >> 1); - lsb_m1 = (lsb - 1); - - // Round to nearest even. - remainder = (mantissa & lsb_m1); - mantissa >>= shift; - if (remainder > lsb_s1 || (remainder == lsb_s1 && (mantissa & 0x1))) { - ++mantissa; - if (!(mantissa & 0x3ff)) { - ++exponent; - mantissa = 0; - } - } - - *dest = (sign | (exponent << 10) | mantissa); -} diff --git a/contrib/lua-torch/torch7/lib/TH/THHalf.h b/contrib/lua-torch/torch7/lib/TH/THHalf.h deleted file mode 100644 index 0f9807b502..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THHalf.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef TH_HALF_H -#define TH_HALF_H - -#include "THGeneral.h" -#include - -/* Neither built-in nor included from Cutorch, use our definition lifted from CUDA */ -#if defined(__GNUC__) -#define __thalign__(n) __attribute__((aligned(n))) -#elif defined(_WIN32) -#define __thalign__(n) __declspec(align(n)) -#else -#define __thalign__(n) -#endif - -typedef struct __thalign__(2){ - unsigned short x; -} __THHalf; - -typedef struct __thalign__(4) { - unsigned int x; -} __THHalf2; - -typedef __THHalf THHalf; -typedef __THHalf2 THHalf2; - -TH_API void TH_float2halfbits(float*, unsigned short*); -TH_API void TH_halfbits2float(unsigned short*, float*); - -TH_API THHalf TH_float2half(float); -TH_API float TH_half2float(THHalf); - -#ifndef TH_HALF_BITS_TO_LITERAL -# define TH_HALF_BITS_TO_LITERAL(n) { n } -#endif - -#define TH_HALF_ZERO 0x0U -#define TH_HALF_INF 0x7C00U - -#undef __thalign__ -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THLapack.c b/contrib/lua-torch/torch7/lib/TH/THLapack.c deleted file mode 100644 index bd4dc716b4..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THLapack.c +++ /dev/null @@ -1,4 +0,0 @@ -#include "THLapack.h" - -#include "generic/THLapack.c" -#include "THGenerateFloatTypes.h" diff --git a/contrib/lua-torch/torch7/lib/TH/THLapack.h b/contrib/lua-torch/torch7/lib/TH/THLapack.h deleted file mode 100644 index 614d15f940..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THLapack.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef TH_LAPACK_INC -#define TH_LAPACK_INC - -#include "THGeneral.h" - -#define THLapack_(NAME) TH_CONCAT_4(TH,Real,Lapack_,NAME) - -#define THLapackCheck(fmt, func, info , ...) \ -if (info < 0) { \ - THError("Lapack Error in %s : Illegal Argument %d", func, -info); \ -} else if(info > 0) { \ - THError(fmt, func, info, ##__VA_ARGS__); \ -} \ - -#define THLapackCheckWithCleanup(fmt, cleanup, func, info , ...) \ -if (info < 0) { \ - cleanup \ - THError("Lapack Error in %s : Illegal Argument %d", func, -info); \ -} else if(info > 0) { \ - cleanup \ - THError(fmt, func, info, ##__VA_ARGS__); \ -} - -#include "generic/THLapack.h" -#include "THGenerateAllTypes.h" - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THLogAdd.c b/contrib/lua-torch/torch7/lib/TH/THLogAdd.c deleted file mode 100644 index 4b14f85402..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THLogAdd.c +++ /dev/null @@ -1,88 +0,0 @@ -#include "THLogAdd.h" - -#include - -#ifdef USE_DOUBLE -#define MINUS_LOG_THRESHOLD -39.14 -#else -#define MINUS_LOG_THRESHOLD -18.42 -#endif - -const double THLog2Pi=1.83787706640934548355; -const double THLogZero=-DBL_MAX; -const double THLogOne=0; - -double THLogAdd(double log_a, double log_b) -{ - double minusdif; - - if (log_a < log_b) - { - double tmp = log_a; - log_a = log_b; - log_b = tmp; - } - - minusdif = log_b - log_a; -#ifdef DEBUG - if (isnan(minusdif)) - THError("THLogAdd: minusdif (%f) log_b (%f) or log_a (%f) is nan", minusdif, log_b, log_a); -#endif - if (minusdif < MINUS_LOG_THRESHOLD) - return log_a; - else - return log_a + log1p(exp(minusdif)); -} - -double THLogSub(double log_a, double log_b) -{ - double minusdif; - - if (log_a < log_b) - THError("LogSub: log_a (%f) should be greater than log_b (%f)", log_a, log_b); - - minusdif = log_b - log_a; -#ifdef DEBUG - if (isnan(minusdif)) - THError("LogSub: minusdif (%f) log_b (%f) or log_a (%f) is nan", minusdif, log_b, log_a); -#endif - if (log_a == log_b) - return THLogZero; - else if (minusdif < MINUS_LOG_THRESHOLD) - return log_a; - else - return log_a + log1p(-exp(minusdif)); -} - -/* Credits to Leon Bottou */ -double THExpMinusApprox(const double x) -{ -#define EXACT_EXPONENTIAL 0 -#if EXACT_EXPONENTIAL - return exp(-x); -#else - /* fast approximation of exp(-x) for x positive */ -# define A0 (1.0) -# define A1 (0.125) -# define A2 (0.0078125) -# define A3 (0.00032552083) -# define A4 (1.0172526e-5) - if (x < 13.0) - { -/* assert(x>=0); */ - double y; - y = A0+x*(A1+x*(A2+x*(A3+x*A4))); - y *= y; - y *= y; - y *= y; - y = 1/y; - return y; - } - return 0; -# undef A0 -# undef A1 -# undef A2 -# undef A3 -# undef A4 -#endif -} diff --git a/contrib/lua-torch/torch7/lib/TH/THLogAdd.h b/contrib/lua-torch/torch7/lib/TH/THLogAdd.h deleted file mode 100644 index 9319b8f464..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THLogAdd.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef TH_LOG_ADD_INC -#define TH_LOG_ADD_INC - -#include "THGeneral.h" - -TH_API const double THLog2Pi; -TH_API const double THLogZero; -TH_API const double THLogOne; - -TH_API double THLogAdd(double log_a, double log_b); -TH_API double THLogSub(double log_a, double log_b); -TH_API double THExpMinusApprox(const double x); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THMath.h b/contrib/lua-torch/torch7/lib/TH/THMath.h deleted file mode 100644 index 004e4fe45d..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THMath.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef _THMATH_H -#define _THMATH_H - -static inline double TH_sigmoid(double value) { - return 1.0 / (1.0 + exp(-value)); -} - -static inline double TH_frac(double x) { - return x - trunc(x); -} - -static inline double TH_rsqrt(double x) { - return 1.0 / sqrt(x); -} - -static inline double TH_lerp(double a, double b, double weight) { - return a + weight * (b-a); -} - -static inline float TH_sigmoidf(float value) { - return 1.0f / (1.0f + expf(-value)); -} - -static inline float TH_fracf(float x) { - return x - truncf(x); -} - -static inline float TH_rsqrtf(float x) { - return 1.0f / sqrtf(x); -} - -static inline float TH_lerpf(float a, float b, float weight) { - return a + weight * (b-a); -} - -#endif // _THMATH_H diff --git a/contrib/lua-torch/torch7/lib/TH/THMemoryFile.c b/contrib/lua-torch/torch7/lib/TH/THMemoryFile.c deleted file mode 100644 index ecce6e1b1f..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THMemoryFile.c +++ /dev/null @@ -1,685 +0,0 @@ -#include "THMemoryFile.h" -#include "THFilePrivate.h" -#include "stdint.h" - -typedef struct THMemoryFile__ -{ - THFile file; - THCharStorage *storage; - size_t size; - size_t position; - int longSize; - -} THMemoryFile; - -static int THMemoryFile_isOpened(THFile *self) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - return (mfself->storage != NULL); -} - -static char *THMemoryFile_strnextspace(char *str_, char *c_) -{ - char c; - - while( (c = *str_) ) - { - if( (c != ' ') && (c != '\n') && (c != ':') && (c != ';') ) - break; - str_++; - } - - while( (c = *str_) ) - { - if( (c == ' ') || (c == '\n') || (c == ':') || (c == ';') ) - { - *c_ = c; - *str_ = '\0'; - return(str_); - } - str_++; - } - return NULL; -} - -static void THMemoryFile_grow(THMemoryFile *self, size_t size) -{ - size_t missingSpace; - - if(size <= self->size) - return; - else - { - if(size < self->storage->size) /* note the "<" and not "<=" */ - { - self->size = size; - self->storage->data[self->size] = '\0'; - return; - } - } - - missingSpace = size-self->storage->size+1; /* +1 for the '\0' */ - THCharStorage_resize(self->storage, (self->storage->size/2 > missingSpace ? - self->storage->size + (self->storage->size/2) - : self->storage->size + missingSpace)); -} - -static int THMemoryFile_mode(const char *mode, int *isReadable, int *isWritable) -{ - *isReadable = 0; - *isWritable = 0; - if(strlen(mode) == 1) - { - if(*mode == 'r') - { - *isReadable = 1; - return 1; - } - else if(*mode == 'w') - { - *isWritable = 1; - return 1; - } - } - else if(strlen(mode) == 2) - { - if(mode[0] == 'r' && mode[1] == 'w') - { - *isReadable = 1; - *isWritable = 1; - return 1; - } - } - return 0; -} - -/********************************************************/ - -#define READ_WRITE_METHODS(TYPE, TYPEC, ASCII_READ_ELEM, ASCII_WRITE_ELEM, INSIDE_SPACING) \ - static size_t THMemoryFile_read##TYPEC(THFile *self, TYPE *data, size_t n) \ - { \ - THMemoryFile *mfself = (THMemoryFile*)self; \ - size_t nread = 0; \ - \ - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); \ - THArgCheck(mfself->file.isReadable, 1, "attempt to read in a write-only file"); \ - \ - if (n == 0) \ - return 0; \ - \ - if(mfself->file.isBinary) \ - { \ - size_t nByte = sizeof(TYPE)*n; \ - size_t nByteRemaining = (mfself->position + nByte <= mfself->size ? nByte : mfself->size-mfself->position); \ - nread = nByteRemaining/sizeof(TYPE); \ - memmove(data, mfself->storage->data+mfself->position, nread*sizeof(TYPE)); \ - mfself->position += nread*sizeof(TYPE); \ - } \ - else \ - { \ - size_t i; \ - for(i = 0; i < n; i++) \ - { \ - size_t nByteRead = 0; \ - char spaceChar = 0; \ - char *spacePtr = THMemoryFile_strnextspace(mfself->storage->data+mfself->position, &spaceChar); \ - ASCII_READ_ELEM; \ - if(ret == EOF) \ - { \ - while(mfself->storage->data[mfself->position]) \ - mfself->position++; \ - } \ - else \ - mfself->position += nByteRead; \ - if(spacePtr) \ - *spacePtr = spaceChar; \ - } \ - if(mfself->file.isAutoSpacing && (n > 0)) \ - { \ - if( (mfself->position < mfself->size) && (mfself->storage->data[mfself->position] == '\n') ) \ - mfself->position++; \ - } \ - } \ - \ - if(nread != n) \ - { \ - mfself->file.hasError = 1; /* shouldn't we put hasError to 0 all the time ? */ \ - if(!mfself->file.isQuiet) \ - THError("read error: read %d blocks instead of %d", nread, n); \ - } \ - \ - return nread; \ - } \ - \ - static size_t THMemoryFile_write##TYPEC(THFile *self, TYPE *data, size_t n) \ - { \ - THMemoryFile *mfself = (THMemoryFile*)self; \ - \ - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); \ - THArgCheck(mfself->file.isWritable, 1, "attempt to write in a read-only file"); \ - \ - if (n == 0) \ - return 0; \ - \ - if(mfself->file.isBinary) \ - { \ - size_t nByte = sizeof(TYPE)*n; \ - THMemoryFile_grow(mfself, mfself->position+nByte); \ - memmove(mfself->storage->data+mfself->position, data, nByte); \ - mfself->position += nByte; \ - if(mfself->position > mfself->size) \ - { \ - mfself->size = mfself->position; \ - mfself->storage->data[mfself->size] = '\0'; \ - } \ - } \ - else \ - { \ - size_t i; \ - for(i = 0; i < n; i++) \ - { \ - ssize_t nByteWritten; \ - while (1) \ - { \ - ASCII_WRITE_ELEM; \ - if( (nByteWritten > -1) && (nByteWritten < mfself->storage->size-mfself->position) ) \ - { \ - mfself->position += nByteWritten; \ - break; \ - } \ - THMemoryFile_grow(mfself, mfself->storage->size + (mfself->storage->size/2) + 2); \ - } \ - if(mfself->file.isAutoSpacing) \ - { \ - if(i < n-1) \ - { \ - THMemoryFile_grow(mfself, mfself->position+1); \ - sprintf(mfself->storage->data+mfself->position, " "); \ - mfself->position++; \ - } \ - if(i == n-1) \ - { \ - THMemoryFile_grow(mfself, mfself->position+1); \ - sprintf(mfself->storage->data+mfself->position, "\n"); \ - mfself->position++; \ - } \ - } \ - } \ - if(mfself->position > mfself->size) \ - { \ - mfself->size = mfself->position; \ - mfself->storage->data[mfself->size] = '\0'; \ - } \ - } \ - \ - return n; \ - } - - -void THMemoryFile_longSize(THFile *self, int size) -{ - THMemoryFile *dfself = (THMemoryFile*)(self); - THArgCheck(size == 0 || size == 4 || size == 8, 1, "Invalid long size specified"); - dfself->longSize = size; -} - -THCharStorage *THMemoryFile_storage(THFile *self) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); - - THCharStorage_resize(mfself->storage, mfself->size+1); - - return mfself->storage; -} - -static void THMemoryFile_synchronize(THFile *self) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); -} - -static void THMemoryFile_seek(THFile *self, size_t position) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); - THArgCheck(position >= 0, 2, "position must be positive"); - - if(position <= mfself->size) - mfself->position = position; - else - { - mfself->file.hasError = 1; - if(!mfself->file.isQuiet) - THError("unable to seek at position %zu", position); - } -} - -static void THMemoryFile_seekEnd(THFile *self) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); - - mfself->position = mfself->size; -} - -static size_t THMemoryFile_position(THFile *self) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); - return mfself->position; -} - -static void THMemoryFile_close(THFile *self) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); - THCharStorage_free(mfself->storage); - mfself->storage = NULL; -} - -static void THMemoryFile_free(THFile *self) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - - if(mfself->storage) - THCharStorage_free(mfself->storage); - - THFree(mfself); -} - -/* READ_WRITE_METHODS(bool, Bool, */ -/* int value = 0; int ret = sscanf(mfself->storage->data+mfself->position, "%d%n", &value, &nByteRead); data[i] = (value ? 1 : 0), */ -/* int value = (data[i] ? 1 : 0); nByteWritten = snprintf(mfself->storage->data+mfself->position, mfself->storage->size-mfself->position, "%d", value), */ -/* 1) */ - -READ_WRITE_METHODS(unsigned char, Byte, - size_t ret = (mfself->position + n <= mfself->size ? n : mfself->size-mfself->position); \ - if(spacePtr) *spacePtr = spaceChar; \ - nByteRead = ret; \ - nread = ret; \ - i = n-1; \ - memmove(data, mfself->storage->data+mfself->position, nByteRead), - nByteWritten = (n < mfself->storage->size-mfself->position ? n : -1); \ - i = n-1; \ - if(nByteWritten > -1) - memmove(mfself->storage->data+mfself->position, data, nByteWritten), - 0) - -/* DEBUG: we should check if %n is count or not as a element (so ret might need to be ret-- on some systems) */ -/* Note that we do a trick for char */ -READ_WRITE_METHODS(char, Char, - size_t ret = (mfself->position + n <= mfself->size ? n : mfself->size-mfself->position); \ - if(spacePtr) *spacePtr = spaceChar; \ - nByteRead = ret; \ - nread = ret; \ - i = n-1; \ - memmove(data, mfself->storage->data+mfself->position, nByteRead), - nByteWritten = (n < mfself->storage->size-mfself->position ? n : -1); \ - i = n-1; \ - if(nByteWritten > -1) - memmove(mfself->storage->data+mfself->position, data, nByteWritten), - 0) - -READ_WRITE_METHODS(short, Short, - int nByteRead_; int ret = sscanf(mfself->storage->data+mfself->position, "%hd%n", &data[i], &nByteRead_); nByteRead = nByteRead_; if(ret <= 0) break; else nread++, - nByteWritten = snprintf(mfself->storage->data+mfself->position, mfself->storage->size-mfself->position, "%hd", data[i]), - 1) - -READ_WRITE_METHODS(int, Int, - int nByteRead_; int ret = sscanf(mfself->storage->data+mfself->position, "%d%n", &data[i], &nByteRead_); nByteRead = nByteRead_; if(ret <= 0) break; else nread++, - nByteWritten = snprintf(mfself->storage->data+mfself->position, mfself->storage->size-mfself->position, "%d", data[i]), - 1) - -READ_WRITE_METHODS(float, Float, - int nByteRead_; int ret = sscanf(mfself->storage->data+mfself->position, "%g%n", &data[i], &nByteRead_); nByteRead = nByteRead_; if(ret <= 0) break; else nread++, - nByteWritten = snprintf(mfself->storage->data+mfself->position, mfself->storage->size-mfself->position, "%.9g", data[i]), - 1) - -READ_WRITE_METHODS(THHalf, Half, - int nByteRead_; float buf; \ - int ret = sscanf(mfself->storage->data+mfself->position, "%g%n", &buf, &nByteRead_); \ - data[i] = TH_float2half(buf); nByteRead = nByteRead_; if(ret <= 0) break; else nread++, - nByteWritten = snprintf(mfself->storage->data+mfself->position, mfself->storage->size-mfself->position, "%.9g", TH_half2float(data[i])), - 1) - -READ_WRITE_METHODS(double, Double, - int nByteRead_; int ret = sscanf(mfself->storage->data+mfself->position, "%lg%n", &data[i], &nByteRead_); nByteRead = nByteRead_; if(ret <= 0) break; else nread++, - nByteWritten = snprintf(mfself->storage->data+mfself->position, mfself->storage->size-mfself->position, "%.17g", data[i]), - 1) - -int THDiskFile_isLittleEndianCPU(void); - -static size_t THMemoryFile_readLong(THFile *self, long *data, size_t n) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - size_t nread = 0L; - - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); - THArgCheck(mfself->file.isReadable, 1, "attempt to read in a write-only file"); - - if (n == 0) - return 0; - - if(mfself->file.isBinary) - { - if(mfself->longSize == 0 || mfself->longSize == sizeof(long)) - { - size_t nByte = sizeof(long)*n; - size_t nByteRemaining = (mfself->position + nByte <= mfself->size ? nByte : mfself->size-mfself->position); - nread = nByteRemaining/sizeof(long); - memmove(data, mfself->storage->data+mfself->position, nread*sizeof(long)); - mfself->position += nread*sizeof(long); - } else if(mfself->longSize == 4) - { - size_t nByte = 4*n; - size_t nByteRemaining = (mfself->position + nByte <= mfself->size ? nByte : mfself->size-mfself->position); - int32_t *storage = (int32_t *)(mfself->storage->data + mfself->position); - nread = nByteRemaining/4; - size_t i; - for(i = 0; i < nread; i++) - data[i] = storage[i]; - mfself->position += nread*4; - } - else /* if(mfself->longSize == 8) */ - { - int big_endian = !THDiskFile_isLittleEndianCPU(); - size_t nByte = 8*n; - int32_t *storage = (int32_t *)(mfself->storage->data + mfself->position); - size_t nByteRemaining = (mfself->position + nByte <= mfself->size ? nByte : mfself->size-mfself->position); - nread = nByteRemaining/8; - size_t i; - for(i = 0; i < nread; i++) - data[i] = storage[2*i + big_endian]; - mfself->position += nread*8; - } - } - else - { - size_t i; - for(i = 0; i < n; i++) - { - size_t nByteRead = 0; - char spaceChar = 0; - char *spacePtr = THMemoryFile_strnextspace(mfself->storage->data+mfself->position, &spaceChar); - int nByteRead_; int ret = sscanf(mfself->storage->data+mfself->position, "%ld%n", &data[i], &nByteRead_); nByteRead = nByteRead_; if(ret <= 0) break; else nread++; - if(ret == EOF) - { - while(mfself->storage->data[mfself->position]) - mfself->position++; - } - else - mfself->position += nByteRead; - if(spacePtr) - *spacePtr = spaceChar; - } - if(mfself->file.isAutoSpacing && (n > 0)) - { - if( (mfself->position < mfself->size) && (mfself->storage->data[mfself->position] == '\n') ) - mfself->position++; - } - } - - if(nread != n) - { - mfself->file.hasError = 1; /* shouldn't we put hasError to 0 all the time ? */ - if(!mfself->file.isQuiet) - THError("read error: read %d blocks instead of %d", nread, n); - } - - return nread; -} - -static size_t THMemoryFile_writeLong(THFile *self, long *data, size_t n) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); - THArgCheck(mfself->file.isWritable, 1, "attempt to write in a read-only file"); - - if (n == 0) - return 0; - - if(mfself->file.isBinary) - { - if(mfself->longSize == 0 || mfself->longSize == sizeof(long)) - { - size_t nByte = sizeof(long)*n; - THMemoryFile_grow(mfself, mfself->position+nByte); - memmove(mfself->storage->data+mfself->position, data, nByte); - mfself->position += nByte; - } else if(mfself->longSize == 4) - { - size_t nByte = 4*n; - THMemoryFile_grow(mfself, mfself->position+nByte); - int32_t *storage = (int32_t *)(mfself->storage->data + mfself->position); - size_t i; - for(i = 0; i < n; i++) - storage[i] = data[i]; - mfself->position += nByte; - } - else /* if(mfself->longSize == 8) */ - { - int big_endian = !THDiskFile_isLittleEndianCPU(); - size_t nByte = 8*n; - THMemoryFile_grow(mfself, mfself->position+nByte); - int32_t *storage = (int32_t *)(mfself->storage->data + mfself->position); - size_t i; - for(i = 0; i < n; i++) - { - storage[2*i + !big_endian] = 0; - storage[2*i + big_endian] = data[i]; - } - mfself->position += nByte; - } - if(mfself->position > mfself->size) - { - mfself->size = mfself->position; - mfself->storage->data[mfself->size] = '\0'; - } - } - else - { - size_t i; - for(i = 0; i < n; i++) - { - ssize_t nByteWritten; - while (1) - { - nByteWritten = snprintf(mfself->storage->data+mfself->position, mfself->storage->size-mfself->position, "%ld", data[i]); - if( (nByteWritten > -1) && (nByteWritten < mfself->storage->size-mfself->position) ) - { - mfself->position += nByteWritten; - break; - } - THMemoryFile_grow(mfself, mfself->storage->size + (mfself->storage->size/2) + 2); - } - if(mfself->file.isAutoSpacing) - { - if(i < n-1) - { - THMemoryFile_grow(mfself, mfself->position+1); - sprintf(mfself->storage->data+mfself->position, " "); - mfself->position++; - } - if(i == n-1) - { - THMemoryFile_grow(mfself, mfself->position+1); - sprintf(mfself->storage->data+mfself->position, "\n"); - mfself->position++; - } - } - } - if(mfself->position > mfself->size) - { - mfself->size = mfself->position; - mfself->storage->data[mfself->size] = '\0'; - } - } - - return n; -} - -static char* THMemoryFile_cloneString(const char *str, ptrdiff_t size) -{ - char *cstr = THAlloc(size); - memcpy(cstr, str, size); - return cstr; -} - -static size_t THMemoryFile_readString(THFile *self, const char *format, char **str_) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); - THArgCheck(mfself->file.isReadable, 1, "attempt to read in a write-only file"); - THArgCheck((strlen(format) >= 2 ? (format[0] == '*') && (format[1] == 'a' || format[1] == 'l') : 0), 2, "format must be '*a' or '*l'"); - - if(mfself->position == mfself->size) /* eof ? */ - { - mfself->file.hasError = 1; - if(!mfself->file.isQuiet) - THError("read error: read 0 blocks instead of 1"); - - *str_ = NULL; - return 0; - } - - if(format[1] == 'a') - { - size_t str_size = mfself->size-mfself->position; - - *str_ = THMemoryFile_cloneString(mfself->storage->data+mfself->position, str_size); - mfself->position = mfself->size; - - return str_size; - } - else - { - char *p = mfself->storage->data+mfself->position; - int eolFound = 0; - size_t posEol; - size_t i; - for(i = 0; i < mfself->size-mfself->position; i++) - { - if(p[i] == '\n') - { - posEol = i; - eolFound = 1; - break; - } - } - - if(eolFound) - { - *str_ = THMemoryFile_cloneString(mfself->storage->data+mfself->position, posEol); - mfself->position += posEol+1; - return posEol; - } - else /* well, we read all! */ - { - size_t str_size = mfself->size-mfself->position; - - *str_ = THMemoryFile_cloneString(mfself->storage->data+mfself->position, str_size); - mfself->position = mfself->size; - - return str_size; - } - } - - *str_ = NULL; - return 0; -} - -static size_t THMemoryFile_writeString(THFile *self, const char *str, size_t size) -{ - THMemoryFile *mfself = (THMemoryFile*)self; - - THArgCheck(mfself->storage != NULL, 1, "attempt to use a closed file"); - THArgCheck(mfself->file.isWritable, 1, "attempt to write in a read-only file"); - - THMemoryFile_grow(mfself, mfself->position+size); - memmove(mfself->storage->data+mfself->position, str, size); - mfself->position += size; - if(mfself->position > mfself->size) - { - mfself->size = mfself->position; - mfself->storage->data[mfself->size] = '\0'; - } - - return size; -} - -THFile *THMemoryFile_newWithStorage(THCharStorage *storage, const char *mode) -{ - static struct THFileVTable vtable = { - THMemoryFile_isOpened, - - THMemoryFile_readByte, - THMemoryFile_readChar, - THMemoryFile_readShort, - THMemoryFile_readInt, - THMemoryFile_readLong, - THMemoryFile_readFloat, - THMemoryFile_readDouble, - THMemoryFile_readHalf, - THMemoryFile_readString, - - THMemoryFile_writeByte, - THMemoryFile_writeChar, - THMemoryFile_writeShort, - THMemoryFile_writeInt, - THMemoryFile_writeLong, - THMemoryFile_writeFloat, - THMemoryFile_writeDouble, - THMemoryFile_writeHalf, - THMemoryFile_writeString, - - THMemoryFile_synchronize, - THMemoryFile_seek, - THMemoryFile_seekEnd, - THMemoryFile_position, - THMemoryFile_close, - THMemoryFile_free - }; - - THMemoryFile *mfself; - int isReadable; - int isWritable; - - if(storage) - { - THArgCheck(storage->data[storage->size-1] == '\0', 1, "provided CharStorage must be terminated by 0"); - THArgCheck(THMemoryFile_mode(mode, &isReadable, &isWritable), 2, "file mode should be 'r','w' or 'rw'"); - THCharStorage_retain(storage); - } - else - { - THArgCheck(THMemoryFile_mode(mode, &isReadable, &isWritable), 2, "file mode should be 'r','w' or 'rw'"); - storage = THCharStorage_newWithSize(1); - storage->data[0] = '\0'; - } - - mfself = THAlloc(sizeof(THMemoryFile)); - - mfself->storage = storage; - mfself->size = (storage ? storage->size-1 : 0); - mfself->position = 0; - mfself->longSize = 0; - - mfself->file.vtable = &vtable; - mfself->file.isQuiet = 0; - mfself->file.isReadable = isReadable; - mfself->file.isWritable = isWritable; - mfself->file.isBinary = 0; - mfself->file.isAutoSpacing = 1; - mfself->file.hasError = 0; - - return (THFile*)mfself; -} - -THFile *THMemoryFile_new(const char *mode) -{ - return THMemoryFile_newWithStorage(NULL, mode); -} diff --git a/contrib/lua-torch/torch7/lib/TH/THMemoryFile.h b/contrib/lua-torch/torch7/lib/TH/THMemoryFile.h deleted file mode 100644 index b54cdcc2f2..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THMemoryFile.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef TH_MEMORY_FILE_INC -#define TH_MEMORY_FILE_INC - -#include "THFile.h" -#include "THStorage.h" - -TH_API THFile *THMemoryFile_newWithStorage(THCharStorage *storage, const char *mode); -TH_API THFile *THMemoryFile_new(const char *mode); - -TH_API THCharStorage *THMemoryFile_storage(THFile *self); -TH_API void THMemoryFile_longSize(THFile *self, int size); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THRandom.c b/contrib/lua-torch/torch7/lib/TH/THRandom.c deleted file mode 100644 index 86d721e7b6..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THRandom.c +++ /dev/null @@ -1,272 +0,0 @@ -#include "THGeneral.h" -#include "THRandom.h" - -/* Code for the Mersenne Twister random generator.... */ -#define n _MERSENNE_STATE_N -#define m _MERSENNE_STATE_M - -/* Creates (unseeded) new generator*/ -static THGenerator* THGenerator_newUnseeded(void) -{ - THGenerator *self = THAlloc(sizeof(THGenerator)); - memset(self, 0, sizeof(THGenerator)); - self->left = 1; - self->seeded = 0; - self->normal_is_valid = 0; - return self; -} - -/* Creates new generator and makes sure it is seeded*/ -THGenerator* THGenerator_new(void) -{ - THGenerator *self = THGenerator_newUnseeded(); - THRandom_seed(self); - return self; -} - -THGenerator* THGenerator_copy(THGenerator *self, THGenerator *from) -{ - memcpy(self, from, sizeof(THGenerator)); - return self; -} - -void THGenerator_free(THGenerator *self) -{ - THFree(self); -} - -int THGenerator_isValid(THGenerator *_generator) -{ - if ((_generator->seeded == 1) && - (_generator->left > 0 && _generator->left <= n) && (_generator->next <= n)) - return 1; - - return 0; -} - -#ifndef _WIN32 -#include -#include -#include -#include - -static unsigned long readURandomLong() -{ - int randDev = open("/dev/urandom", O_RDONLY); - unsigned long randValue; - if (randDev < 0) { - THError("Unable to open /dev/urandom"); - } - ssize_t readBytes = read(randDev, &randValue, sizeof(randValue)); - if (readBytes < sizeof(randValue)) { - THError("Unable to read from /dev/urandom"); - } - close(randDev); - return randValue; -} -#endif // _WIN32 - -unsigned long THRandom_seed(THGenerator *_generator) -{ -#ifdef _WIN32 - unsigned long s = (unsigned long)time(0); -#else - unsigned long s = readURandomLong(); -#endif - THRandom_manualSeed(_generator, s); - return s; -} - -/* The next 4 methods are taken from http:www.math.keio.ac.jpmatumotoemt.html - Here is the copyright: - Some minor modifications have been made to adapt to "my" C... */ - -/* - A C-program for MT19937, with initialization improved 2002/2/10. - Coded by Takuji Nishimura and Makoto Matsumoto. - This is a faster version by taking Shawn Cokus's optimization, - Matthe Bellew's simplification, Isaku Wada's double version. - - Before using, initialize the state by using init_genrand(seed) - or init_by_array(init_key, key_length). - - Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. The names of its contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - Any feedback is very welcome. - http://www.math.keio.ac.jp/matumoto/emt.html - email: matumoto@math.keio.ac.jp -*/ - -/* Macros for the Mersenne Twister random generator... */ -/* Period parameters */ -/* #define n 624 */ -/* #define m 397 */ -#define MATRIX_A 0x9908b0dfUL /* constant vector a */ -#define UMASK 0x80000000UL /* most significant w-r bits */ -#define LMASK 0x7fffffffUL /* least significant r bits */ -#define MIXBITS(u,v) ( ((u) & UMASK) | ((v) & LMASK) ) -#define TWIST(u,v) ((MIXBITS(u,v) >> 1) ^ ((v)&1UL ? MATRIX_A : 0UL)) -/*********************************************************** That's it. */ - -void THRandom_manualSeed(THGenerator *_generator, unsigned long the_seed_) -{ - int j; - - /* This ensures reseeding resets all of the state (i.e. state for Gaussian numbers) */ - THGenerator *blank = THGenerator_newUnseeded(); - THGenerator_copy(_generator, blank); - THGenerator_free(blank); - - _generator->the_initial_seed = the_seed_; - _generator->state[0] = _generator->the_initial_seed & 0xffffffffUL; - for(j = 1; j < n; j++) - { - _generator->state[j] = (1812433253UL * (_generator->state[j-1] ^ (_generator->state[j-1] >> 30)) + j); - /* See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier. */ - /* In the previous versions, mSBs of the seed affect */ - /* only mSBs of the array state[]. */ - /* 2002/01/09 modified by makoto matsumoto */ - _generator->state[j] &= 0xffffffffUL; /* for >32 bit machines */ - } - _generator->left = 1; - _generator->seeded = 1; -} - -unsigned long THRandom_initialSeed(THGenerator *_generator) -{ - return _generator->the_initial_seed; -} - -void THRandom_nextState(THGenerator *_generator) -{ - unsigned long *p = _generator->state; - int j; - - _generator->left = n; - _generator->next = 0; - - for(j = n-m+1; --j; p++) - *p = p[m] ^ TWIST(p[0], p[1]); - - for(j = m; --j; p++) - *p = p[m-n] ^ TWIST(p[0], p[1]); - - *p = p[m-n] ^ TWIST(p[0], _generator->state[0]); -} - -unsigned long THRandom_random(THGenerator *_generator) -{ - unsigned long y; - - if (--(_generator->left) == 0) - THRandom_nextState(_generator); - y = *(_generator->state + (_generator->next)++); - - /* Tempering */ - y ^= (y >> 11); - y ^= (y << 7) & 0x9d2c5680UL; - y ^= (y << 15) & 0xefc60000UL; - y ^= (y >> 18); - - return y; -} - -/* generates a random number on [0,1)-double-interval */ -static double __uniform__(THGenerator *_generator) -{ - /* divided by 2^32 */ - return (double)THRandom_random(_generator) * (1.0/4294967296.0); -} - -/********************************************************* - - Thanks *a lot* Takuji Nishimura and Makoto Matsumoto! - - Now my own code... - -*********************************************************/ - -double THRandom_uniform(THGenerator *_generator, double a, double b) -{ - return(__uniform__(_generator) * (b - a) + a); -} - -double THRandom_normal(THGenerator *_generator, double mean, double stdv) -{ - THArgCheck(stdv > 0, 2, "standard deviation must be strictly positive"); - - /* This is known as the Box-Muller method */ - if(!_generator->normal_is_valid) - { - _generator->normal_x = __uniform__(_generator); - _generator->normal_y = __uniform__(_generator); - _generator->normal_rho = sqrt(-2. * log(1.0-_generator->normal_y)); - _generator->normal_is_valid = 1; - } - else - _generator->normal_is_valid = 0; - - if(_generator->normal_is_valid) - return _generator->normal_rho*cos(2.*M_PI*_generator->normal_x)*stdv+mean; - else - return _generator->normal_rho*sin(2.*M_PI*_generator->normal_x)*stdv+mean; -} - -double THRandom_exponential(THGenerator *_generator, double lambda) -{ - return(-1. / lambda * log(1-__uniform__(_generator))); -} - -double THRandom_cauchy(THGenerator *_generator, double median, double sigma) -{ - return(median + sigma * tan(M_PI*(__uniform__(_generator)-0.5))); -} - -/* Faut etre malade pour utiliser ca. - M'enfin. */ -double THRandom_logNormal(THGenerator *_generator, double mean, double stdv) -{ - THArgCheck(stdv > 0, 2, "standard deviation must be strictly positive"); - return(exp(THRandom_normal(_generator, mean, stdv))); -} - -int THRandom_geometric(THGenerator *_generator, double p) -{ - THArgCheck(p > 0 && p < 1, 1, "must be > 0 and < 1"); - return((int)(log(1-__uniform__(_generator)) / log(p)) + 1); -} - -int THRandom_bernoulli(THGenerator *_generator, double p) -{ - THArgCheck(p >= 0 && p <= 1, 1, "must be >= 0 and <= 1"); - return(__uniform__(_generator) <= p); -} diff --git a/contrib/lua-torch/torch7/lib/TH/THRandom.h b/contrib/lua-torch/torch7/lib/TH/THRandom.h deleted file mode 100644 index 28a14c0d7f..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THRandom.h +++ /dev/null @@ -1,81 +0,0 @@ -#ifndef TH_RANDOM_INC -#define TH_RANDOM_INC - -#include "THGeneral.h" - -#define _MERSENNE_STATE_N 624 -#define _MERSENNE_STATE_M 397 -/* A THGenerator contains all the state required for a single random number stream */ -typedef struct THGenerator { - /* The initial seed. */ - unsigned long the_initial_seed; - int left; /* = 1; */ - int seeded; /* = 0; */ - unsigned long next; - unsigned long state[_MERSENNE_STATE_N]; /* the array for the state vector */ - /********************************/ - - /* For normal distribution */ - double normal_x; - double normal_y; - double normal_rho; - int normal_is_valid; /* = 0; */ -} THGenerator; - -#define torch_Generator "torch.Generator" - -/* Manipulate THGenerator objects */ -TH_API THGenerator * THGenerator_new(void); -TH_API THGenerator * THGenerator_copy(THGenerator *self, THGenerator *from); -TH_API void THGenerator_free(THGenerator *gen); - -/* Checks if given generator is valid */ -TH_API int THGenerator_isValid(THGenerator *_generator); - -/* Initializes the random number generator from /dev/urandom (or on Windows -platforms with the current time (granularity: seconds)) and returns the seed. */ -TH_API unsigned long THRandom_seed(THGenerator *_generator); - -/* Initializes the random number generator with the given long "the_seed_". */ -TH_API void THRandom_manualSeed(THGenerator *_generator, unsigned long the_seed_); - -/* Returns the starting seed used. */ -TH_API unsigned long THRandom_initialSeed(THGenerator *_generator); - -/* Generates a uniform 32 bits integer. */ -TH_API unsigned long THRandom_random(THGenerator *_generator); - -/* Generates a uniform random number on [0,1[. */ -TH_API double THRandom_uniform(THGenerator *_generator, double a, double b); - -/** Generates a random number from a normal distribution. - (With mean #mean# and standard deviation #stdv >= 0#). -*/ -TH_API double THRandom_normal(THGenerator *_generator, double mean, double stdv); - -/** Generates a random number from an exponential distribution. - The density is $p(x) = lambda * exp(-lambda * x)$, where - lambda is a positive number. -*/ -TH_API double THRandom_exponential(THGenerator *_generator, double lambda); - -/** Returns a random number from a Cauchy distribution. - The Cauchy density is $p(x) = sigma/(pi*(sigma^2 + (x-median)^2))$ -*/ -TH_API double THRandom_cauchy(THGenerator *_generator, double median, double sigma); - -/** Generates a random number from a log-normal distribution. - (#mean > 0# is the mean of the log-normal distribution - and #stdv# is its standard deviation). -*/ -TH_API double THRandom_logNormal(THGenerator *_generator, double mean, double stdv); - -/** Generates a random number from a geometric distribution. - It returns an integer #i#, where $p(i) = (1-p) * p^(i-1)$. - p must satisfy $0 < p < 1$. -*/ -TH_API int THRandom_geometric(THGenerator *_generator, double p); - -/* Returns true with probability $p$ and false with probability $1-p$ (p > 0). */ -TH_API int THRandom_bernoulli(THGenerator *_generator, double p); -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THSize.c b/contrib/lua-torch/torch7/lib/TH/THSize.c deleted file mode 100644 index ccf1f61ddf..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THSize.c +++ /dev/null @@ -1,26 +0,0 @@ -#include "THSize.h" - -int THSize_isSameSizeAs(const long *sizeA, long dimsA, const long *sizeB, long dimsB) { - int d; - if (dimsA != dimsB) - return 0; - for(d = 0; d < dimsA; ++d) - { - if(sizeA[d] != sizeB[d]) - return 0; - } - return 1; -} - -ptrdiff_t THSize_nElement(long dims, long *size) { - if(dims == 0) - return 0; - else - { - ptrdiff_t nElement = 1; - int d; - for(d = 0; d < dims; d++) - nElement *= size[d]; - return nElement; - } -} diff --git a/contrib/lua-torch/torch7/lib/TH/THSize.h b/contrib/lua-torch/torch7/lib/TH/THSize.h deleted file mode 100644 index 3d39696f6b..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THSize.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef TH_SIZE_INC -#define TH_SIZE_INC - -#include "THGeneral.h" -#include - -// THTensor functions that would work on a THSize if we had such a class in C++, -// i.e. THTensor functions that depend only on the shape of the tensor, not the type. - -TH_API int THSize_isSameSizeAs(const long *sizeA, long dimsA, const long *sizeB, long dimsB); -TH_API ptrdiff_t THSize_nElement(long dims, long *size); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THStorage.c b/contrib/lua-torch/torch7/lib/TH/THStorage.c deleted file mode 100644 index f6b63f4a8e..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THStorage.c +++ /dev/null @@ -1,153 +0,0 @@ -#include "THAtomic.h" -#include "THStorage.h" - -#include "generic/THStorage.c" -#include "THGenerateAllTypes.h" - -#include "generic/THStorage.c" -#include "THGenerateHalfType.h" - -#include "generic/THStorageCopy.c" -#include "THGenerateAllTypes.h" - -#include "generic/THStorageCopy.c" -#include "THGenerateHalfType.h" - - -THDescBuff THLongStorage_sizeDesc(const THLongStorage *size) { - return _THSizeDesc(size->data, size->size); -} - -THLongStorage *THLongStorage_newInferSize(THLongStorage *size, ptrdiff_t nElement) -{ - ptrdiff_t total_size = (size->size > 0 ? 1 : 0); - ptrdiff_t dim_infer = -1; - ptrdiff_t i; - for (i = 0; i < size->size; i++) { - if (size->data[i] == -1) { - THArgCheck(dim_infer == -1, 1, "only one dimension can be inferred"); - dim_infer = i; - } else { - total_size *= size->data[i]; - } - } - if (dim_infer != -1) { - THDescBuff buf = THLongStorage_sizeDesc(size); - THArgCheck(total_size > 0 && nElement % total_size == 0, 2, - "size '%s' is invalid for input of with %td elements", buf.str, nElement); - } else { - THDescBuff buf = THLongStorage_sizeDesc(size); - THArgCheck(nElement == total_size, 2, - "size '%s' is invalid for input of with %td elements", buf.str, nElement); - } - THLongStorage* copy = THLongStorage_newWithSize(size->size); - THLongStorage_copy(copy, size); - if (dim_infer != -1) { - copy->data[dim_infer] = nElement / total_size; - } - return copy; -} - -int THLongStorage_inferSize2(THLongStorage *output, long *sizesA, long dimsA, long *sizesB, long dimsB, - char *error_buffer, int buffer_len) { - THArgCheck(sizesA != NULL, 1, "sizesA must not be null"); - THArgCheck(sizesB != NULL, 2, "sizesB must not be null"); - THArgCheck(dimsA, 1, "Can't expand empty tensor a"); - THArgCheck(dimsB, 1, "Can't expand empty tensor b"); - ptrdiff_t ndim = dimsA > dimsB ? dimsA : dimsB; - - long *expandedSizes = THAlloc(sizeof(long)*ndim); - - for (long i = ndim - 1; i >= 0; --i) { - long offset = ndim - 1 - i; - long dimA = dimsA - 1 - offset; - long dimB = dimsB - 1 - offset; - long sizeA = (dimA >= 0) ? sizesA[dimA] : 1; - long sizeB = (dimB >= 0) ? sizesB[dimB] : 1; - if (sizeA == sizeB || sizeA == 1 || sizeB == 1) { - expandedSizes[i] = THMax(sizeA, sizeB); - } else { - THFree(expandedSizes); - snprintf(error_buffer, buffer_len, "The size of tensor a (%ld) must match the size of tensor b (%ld) at " - "non-singleton dimension %ld.", sizeA, sizeB, i); - return -1; - } - } - THLongStorage_resize(output, ndim); - memcpy(THLongStorage_data(output), expandedSizes, sizeof(long)*ndim); - THFree(expandedSizes); - return 0; -} - -int THLongStorage_inferSizeN(THLongStorage *output, int n, long **sizes, long *dims, - char *error_buffer, int buffer_len) { - THArgCheck(n > 0, 2, "n must be greater than 0"); - THArgCheck(sizes != NULL, 1, "sizes must not be null"); - THArgCheck(dims != NULL, 1, "dims must not be null"); - - ptrdiff_t ndim = 0; - for (int j = 0; j < n; ++j) { - THArgCheck(sizes[ j ] != NULL, 1, "size %d must not be null", j); - THArgCheck(dims[ j ], 1, "Can't expand empty tensor %d", j); - ndim = dims[ j ] > ndim ? dims[ j ] : ndim; - } - - long *expandedSizes = THAlloc(sizeof(long)*ndim); - - for (long i = ndim - 1; i >= 0; --i) { - expandedSizes[ i ] = 1; - long offset = ndim - 1 - i; - for (int j = 0; j < n; ++j) { - long dim = dims[ j ] - 1 - offset; - long size = (dim >= 0) ? sizes[ j ][ dim ] : 1; - if (size == expandedSizes[ i ] || size == 1 || expandedSizes[ i ] == 1) { - expandedSizes[ i ] = THMax(expandedSizes[ i ], size); - } else { - THFree(expandedSizes); - snprintf(error_buffer, buffer_len, "The size of tensor %i (%ld) must match the expanded size" - "of tensor (%ld) at non-singleton dimension %ld.", j, size, expandedSizes[ i ], i); - return -1; - } - } - } - THLongStorage_resize(output, ndim); - memcpy(THLongStorage_data(output), expandedSizes, sizeof(long)*ndim); - THFree(expandedSizes); - return 0; -} - -int THLongStorage_inferExpandGeometry(long *tensorSizes, long *tensorStrides, long tensorDim, - THLongStorage *sizes, long **expandedSizes, long **expandedStrides, - char *error_buffer, int buffer_len) { - ptrdiff_t ndim = THLongStorage_size(sizes); - - long *expandedSizesCalc = THAlloc(sizeof(long)*ndim); - long *expandedStridesCalc = THAlloc(sizeof(long)*ndim); - - // create a new geometry for the tensors - for (long i = ndim - 1; i >= 0; --i) { - long offset = ndim - 1 - i; - long dim = tensorDim - 1 - offset; - long size = (dim >= 0) ? tensorSizes[dim] : 1; - long stride = (dim >= 0) ? - tensorStrides[dim] : expandedSizesCalc[i + 1] * expandedStridesCalc[i+1]; - long targetSize = THLongStorage_data(sizes)[i]; - if (size != targetSize) { - if (size == 1) { - size = targetSize; - stride = 0; - } else { - THFree(expandedSizesCalc); - THFree(expandedStridesCalc); - snprintf(error_buffer, buffer_len, "The expanded size of the tensor (%ld) must match the existing size (%ld) at " - "non-singleton dimension %ld.", targetSize, size, i); - return -1; - } - } - expandedSizesCalc[i] = size; - expandedStridesCalc[i] = stride; - } - *expandedSizes = expandedSizesCalc; - *expandedStrides = expandedStridesCalc; - return 0; -} diff --git a/contrib/lua-torch/torch7/lib/TH/THStorage.h b/contrib/lua-torch/torch7/lib/TH/THStorage.h deleted file mode 100644 index fb7946bd98..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THStorage.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef TH_STORAGE_INC -#define TH_STORAGE_INC - -#include "THGeneral.h" -#include "THAllocator.h" - -#define THStorage TH_CONCAT_3(TH,Real,Storage) -#define THStorage_(NAME) TH_CONCAT_4(TH,Real,Storage_,NAME) - -/* fast access methods */ -#define TH_STORAGE_GET(storage, idx) ((storage)->data[(idx)]) -#define TH_STORAGE_SET(storage, idx, value) ((storage)->data[(idx)] = (value)) - -#include "generic/THStorage.h" -#include "THGenerateAllTypes.h" - -#include "generic/THStorage.h" -#include "THGenerateHalfType.h" - -#include "generic/THStorageCopy.h" -#include "THGenerateAllTypes.h" - -#include "generic/THStorageCopy.h" -#include "THGenerateHalfType.h" - -TH_API THDescBuff THLongStorage_sizeDesc(const THLongStorage *size); -TH_API THLongStorage *THLongStorage_newInferSize(THLongStorage *size, ptrdiff_t nElement); - -// Given the sizes of {2,N} tensors, write out the size when the tensors are expanded together. -TH_API int THLongStorage_inferSize2(THLongStorage *output, long *sizesA, long dimsA, - long *sizesB, long dimsB, char *error_buffer, int buffer_len); -TH_API int THLongStorage_inferSizeN(THLongStorage *output, int n, long **sizes, long *dims, - char *error_buffer, int buffer_len); - -TH_API int THLongStorage_inferExpandGeometry(long *tensorSizes, long *tensorStrides, long tensorDim, - THLongStorage *sizes, long **expandedSizes, long **expandedStrides, - char *error_buffer, int buffer_len); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THTensor.c b/contrib/lua-torch/torch7/lib/TH/THTensor.c deleted file mode 100644 index 115e396a1c..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THTensor.c +++ /dev/null @@ -1,34 +0,0 @@ -#include "THAtomic.h" -#include "THTensor.h" -#include "THVector.h" -#include "generic/simd/simd.h" - -#include "THBlas.h" -#include "THLapack.h" -#include "THRandom.h" -#include "THTensorDimApply.h" -#include "THMath.h" - -#include "generic/THTensor.c" -#include "THGenerateAllTypes.h" - -#include "generic/THTensor.c" -#include "THGenerateHalfType.h" - -#include "generic/THTensorCopy.c" -#include "THGenerateAllTypes.h" - -#include "generic/THTensorCopy.c" -#include "THGenerateHalfType.h" - -#include "generic/THTensorRandom.c" -#include "THGenerateAllTypes.h" - -#include "generic/THTensorMath.c" -#include "THGenerateAllTypes.h" - -#include "generic/THTensorConv.c" -#include "THGenerateAllTypes.h" - -#include "generic/THTensorLapack.c" -#include "THGenerateFloatTypes.h" diff --git a/contrib/lua-torch/torch7/lib/TH/THTensor.h b/contrib/lua-torch/torch7/lib/TH/THTensor.h deleted file mode 100644 index d2a1c57e84..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THTensor.h +++ /dev/null @@ -1,42 +0,0 @@ -#ifndef TH_TENSOR_INC -#define TH_TENSOR_INC - -#include "THStorage.h" -#include "THTensorApply.h" - -#define THTensor TH_CONCAT_3(TH,Real,Tensor) -#define THTensor_(NAME) TH_CONCAT_4(TH,Real,Tensor_,NAME) - -/* basics */ -#include "generic/THTensor.h" -#include "THGenerateAllTypes.h" - -#include "generic/THTensor.h" -#include "THGenerateHalfType.h" - -#include "generic/THTensorCopy.h" -#include "THGenerateAllTypes.h" - -#include "generic/THTensorCopy.h" -#include "THGenerateHalfType.h" - -#include "THTensorMacros.h" - -/* random numbers */ -#include "THRandom.h" -#include "generic/THTensorRandom.h" -#include "THGenerateAllTypes.h" - -/* maths */ -#include "generic/THTensorMath.h" -#include "THGenerateAllTypes.h" - -/* convolutions */ -#include "generic/THTensorConv.h" -#include "THGenerateAllTypes.h" - -/* lapack support */ -#include "generic/THTensorLapack.h" -#include "THGenerateFloatTypes.h" - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THTensorApply.h b/contrib/lua-torch/torch7/lib/TH/THTensorApply.h deleted file mode 100644 index 7f48da47ec..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THTensorApply.h +++ /dev/null @@ -1,238 +0,0 @@ -#ifndef TH_TENSOR_APPLY_INC -#define TH_TENSOR_APPLY_INC - -/* - * The basic strategy for apply is as follows: - * - * 1. Starting with the outermost index, loop until we reach a dimension where the - * data is no longer contiguous, i.e. the stride at that dimension is not equal to - * the size of the tensor defined by the outer dimensions. Let's call this outer - * (contiguous) tensor A. Note that if the Tensor is contiguous, then A is equal - * to the entire Tensor. Let's call the inner tensor B. - * - * 2. We loop through the indices in B, starting at its outermost dimension. For - * example, if B is a 2x2 matrix, then we do: - * - * B[0][0] - * B[0][1] - * B[1][0] - * B[1][1] - * - * We set the offset into the underlying storage as (storageOffset + stride_B * index_B), - * i.e. basically we compute the offset into the storage as we would normally for a - * Tensor. But because we are guaranteed the subsequent data is contiguous in memory, we - * can simply loop for sizeof(A) iterations and perform the operation, without having to - * follow the order described by the strides of A. - * - * 3. As an optimization, we merge dimensions of A that are contiguous in memory. For - * example, if A is a 3x3x3x3 tensor narrowed from a 3x3x4x3 tensor, then the first two - * dimensions can be merged for the purposes of APPLY, reducing the number of nested - * loops. - */ - -#define __TH_TENSOR_APPLYX_PREAMBLE(TYPE, TENSOR, DIM, ALLOW_CONTIGUOUS) \ - TYPE *TENSOR##_data = NULL; \ - long *TENSOR##_counter = NULL, *TENSOR##_sizes = NULL, *TENSOR##_strides = NULL, *TENSOR##_dimOffset = NULL; \ - long TENSOR##_stride = 0, TENSOR##_size = 0, TENSOR##_dim = 0, TENSOR##_i, TENSOR##_n; \ - int TENSOR##_contiguous = ALLOW_CONTIGUOUS && DIM < 0; \ - TENSOR##_n = (TENSOR->nDimension ? 1 : 0); \ - for(TENSOR##_i = 0; TENSOR##_i < TENSOR->nDimension; TENSOR##_i++) \ - TENSOR##_n *= TENSOR->size[TENSOR##_i]; \ -\ - if(TENSOR->nDimension == 0) \ - TH_TENSOR_APPLY_hasFinished = 1; \ - else \ - { \ - TENSOR##_data = TENSOR->storage->data+TENSOR->storageOffset; \ - TENSOR##_size = 1; \ - TENSOR##_stride = 1; \ - for(TENSOR##_i = TENSOR->nDimension-1; TENSOR##_i >= 0; TENSOR##_i--) { \ - if(TENSOR->size[TENSOR##_i] != 1) { \ - if(TENSOR->stride[TENSOR##_i] == TENSOR##_size && TENSOR##_i != DIM) \ - TENSOR##_size *= TENSOR->size[TENSOR##_i]; \ - else{ \ - TENSOR##_contiguous = 0; \ - break; \ - } \ - } \ - } \ - if (!TENSOR##_contiguous) { \ - /* Find the dimension of contiguous sections */ \ - TENSOR##_dim = 1; \ - for(TENSOR##_i = TENSOR->nDimension-2; TENSOR##_i >= 0; TENSOR##_i--) \ - { \ - if(TENSOR->stride[TENSOR##_i] != TENSOR->stride[TENSOR##_i+1] * TENSOR->size[TENSOR##_i+1] || TENSOR##_i == DIM || TENSOR##_i+1 == DIM) \ - TENSOR##_dim++; \ - } \ - /* Allocate an array of 3*dim elements, where dim is the number of contiguous sections */ \ - TENSOR##_counter = (long*)THAlloc(sizeof(long)*(3*TENSOR##_dim)); \ - TENSOR##_sizes = TENSOR##_counter + TENSOR##_dim; \ - TENSOR##_strides = TENSOR##_counter + 2*TENSOR##_dim; \ - TH_TENSOR_dim_index = TENSOR##_dim-1; \ - TENSOR##_dimOffset = (DIM == TENSOR->nDimension-1) ? &TENSOR##_i : &TENSOR##_counter[DIM]; \ - TENSOR##_sizes[TH_TENSOR_dim_index] = TENSOR->size[TENSOR->nDimension-1]; \ - TENSOR##_strides[TH_TENSOR_dim_index] = TENSOR->stride[TENSOR->nDimension-1]; \ - /* TENSOR##_counter tracks where we are in the storage. The offset into the */ \ - /* storage is given by storage_offset + (i * j), where i is the stride */ \ - /* vector and j is tensor_counter vector. This sets the starting position for the loop. */ \ - for(TENSOR##_i = TENSOR##_dim-1; TENSOR##_i >= 0; --TENSOR##_i) { \ - TENSOR##_counter[TENSOR##_i] = 0; \ - } \ - for(TENSOR##_i = TENSOR->nDimension-2; TENSOR##_i >= 0; --TENSOR##_i) { \ - if (TENSOR->stride[TENSOR##_i] == TENSOR->stride[TENSOR##_i+1] * TENSOR->size[TENSOR##_i+1] && TENSOR##_i != DIM && TENSOR##_i+1 != DIM) { \ - TENSOR##_sizes[TH_TENSOR_dim_index] = TENSOR->size[TENSOR##_i] * TENSOR##_sizes[TH_TENSOR_dim_index]; \ - if (DIM != TENSOR->nDimension-1 && TENSOR##_i < DIM) \ - TENSOR##_dimOffset--; \ - } else { \ - --TH_TENSOR_dim_index; \ - TENSOR##_sizes[TH_TENSOR_dim_index] = TENSOR->size[TENSOR##_i]; \ - TENSOR##_strides[TH_TENSOR_dim_index] = TENSOR->stride[TENSOR##_i]; \ - } \ - } \ - /* Size of the inner most section */ \ - TENSOR##_size = TENSOR##_sizes[TENSOR##_dim-1]; \ - /* Stride of the inner most section */ \ - TENSOR##_stride = TENSOR##_strides[TENSOR##_dim-1]; \ - } \ - } \ - TENSOR##_i = 0; - -#define __TH_TENSOR_APPLYX_UPDATE_COUNTERS(TENSOR, ALWAYS_UPDATE) \ - if(TENSOR##_i == TENSOR##_size || ALWAYS_UPDATE) \ - { \ - if(TENSOR##_contiguous) \ - break; \ -\ - if(TENSOR##_dim == 1) \ - break; \ -\ - /* Reset pointer to beginning of loop */ \ - TENSOR##_data -= TENSOR##_size*TENSOR##_stride; \ - for(TENSOR##_i = TENSOR##_dim-2; TENSOR##_i >= 0; TENSOR##_i--) \ - { \ - TENSOR##_counter[TENSOR##_i]++; \ - /* Jump ahread by the stride of this dimension */ \ - TENSOR##_data += TENSOR##_strides[TENSOR##_i]; \ -\ - if(TENSOR##_counter[TENSOR##_i] == TENSOR##_sizes[TENSOR##_i]) \ - { \ - if(TENSOR##_i == 0) \ - { \ - TH_TENSOR_APPLY_hasFinished = 1; \ - break; \ - } \ - else \ - { \ - /* Reset the pointer to the beginning of the chunk defined by this dimension */ \ - TENSOR##_data -= TENSOR##_counter[TENSOR##_i]*TENSOR##_strides[TENSOR##_i]; \ - TENSOR##_counter[TENSOR##_i] = 0; \ - } \ - } \ - else \ - break; \ - } \ - TENSOR##_i = 0; \ - } \ - -#define TH_TENSOR_APPLY3_D(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, DIM, CODE) \ -{ \ - int TH_TENSOR_APPLY_hasFinished = 0; \ - long TH_TENSOR_dim_index = 0; \ - __TH_TENSOR_APPLYX_PREAMBLE(TYPE1, TENSOR1, DIM, 1) \ - __TH_TENSOR_APPLYX_PREAMBLE(TYPE2, TENSOR2, DIM, 1) \ - __TH_TENSOR_APPLYX_PREAMBLE(TYPE3, TENSOR3, DIM, 1) \ - \ - int elements_equal = 1; \ - if(TENSOR1##_n != TENSOR2##_n) { \ - elements_equal = 0; \ - } \ - else if(TENSOR1##_n != TENSOR3##_n) { \ - elements_equal = 0; \ - } \ - if (elements_equal == 0) { \ - THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \ - THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \ - THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->nDimension); \ - THError("inconsistent tensor size, expected %s %s, %s %s and %s %s to have the same " \ - "number of elements, but got %d, %d and %d elements respectively", \ - #TENSOR1, T1buff.str, #TENSOR2, T2buff.str, #TENSOR3, T3buff.str, \ - TENSOR1##_n, TENSOR2##_n, TENSOR3##_n); \ - } \ - \ - while(!TH_TENSOR_APPLY_hasFinished) \ - { \ - /* Loop through the inner most region of the Tensor */ \ - for(; TENSOR1##_i < TENSOR1##_size && TENSOR2##_i < TENSOR2##_size && TENSOR3##_i < TENSOR3##_size; TENSOR1##_i++, TENSOR2##_i++, TENSOR3##_i++, TENSOR1##_data += TENSOR1##_stride, TENSOR2##_data += TENSOR2##_stride, TENSOR3##_data += TENSOR3##_stride) /* 0 et pas TENSOR##_dim! */ \ - { \ - CODE \ - } \ - __TH_TENSOR_APPLYX_UPDATE_COUNTERS(TENSOR1, 0) \ - __TH_TENSOR_APPLYX_UPDATE_COUNTERS(TENSOR2, 0) \ - __TH_TENSOR_APPLYX_UPDATE_COUNTERS(TENSOR3, 0) \ - } \ - if(TENSOR1##_counter != NULL) \ - THFree(TENSOR1##_counter); \ - if(TENSOR2##_counter != NULL) \ - THFree(TENSOR2##_counter); \ - if(TENSOR3##_counter != NULL) \ - THFree(TENSOR3##_counter); \ -} - -#define TH_TENSOR_APPLY3(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \ - TH_TENSOR_APPLY3_D(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, -1, CODE) - -#define TH_TENSOR_APPLY2_D(TYPE1, TENSOR1, TYPE2, TENSOR2, DIM, CODE) \ -{ \ - int TH_TENSOR_APPLY_hasFinished = 0; \ - long TH_TENSOR_dim_index = 0; \ - __TH_TENSOR_APPLYX_PREAMBLE(TYPE1, TENSOR1, DIM, 1) \ - __TH_TENSOR_APPLYX_PREAMBLE(TYPE2, TENSOR2, DIM, 1) \ -\ - if(TENSOR1##_n != TENSOR2##_n) { \ - THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \ - THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \ - THError("inconsistent tensor size, expected %s %s and %s %s to have the same " \ - "number of elements, but got %d and %d elements respectively", \ - #TENSOR1, T1buff.str, #TENSOR2, T2buff.str, TENSOR1##_n, TENSOR2##_n); \ - } \ - while(!TH_TENSOR_APPLY_hasFinished) \ - { \ - /* Loop through the inner most region of the Tensor */ \ - for(; TENSOR1##_i < TENSOR1##_size && TENSOR2##_i < TENSOR2##_size; TENSOR1##_i++, TENSOR2##_i++, TENSOR1##_data += TENSOR1##_stride, TENSOR2##_data += TENSOR2##_stride) /* 0 et pas TENSOR##_dim! */ \ - { \ - CODE \ - } \ - __TH_TENSOR_APPLYX_UPDATE_COUNTERS(TENSOR1, 0) \ - __TH_TENSOR_APPLYX_UPDATE_COUNTERS(TENSOR2, 0) \ - } \ - if(TENSOR1##_counter != NULL) \ - THFree(TENSOR1##_counter); \ - if(TENSOR2##_counter != NULL) \ - THFree(TENSOR2##_counter); \ -} - -#define TH_TENSOR_APPLY2(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \ - TH_TENSOR_APPLY2_D(TYPE1, TENSOR1, TYPE2, TENSOR2, -1, CODE) - -#define TH_TENSOR_APPLY_D(TYPE, TENSOR, DIM, CODE) \ -{ \ - int TH_TENSOR_APPLY_hasFinished = 0; \ - long TH_TENSOR_dim_index = 0; \ - __TH_TENSOR_APPLYX_PREAMBLE(TYPE, TENSOR, DIM, 0) \ -\ - while(!TH_TENSOR_APPLY_hasFinished) \ - { \ - /* Loop through the inner most region of the Tensor */ \ - for(; TENSOR##_i < TENSOR##_size; TENSOR##_i++, TENSOR##_data += TENSOR##_stride) /* 0 et pas TENSOR##_dim! */ \ - { \ - CODE \ - } \ - __TH_TENSOR_APPLYX_UPDATE_COUNTERS(TENSOR, 1) \ - } \ - THFree(TENSOR##_counter); \ -} - -#define TH_TENSOR_APPLY(TYPE, TENSOR, CODE) \ - TH_TENSOR_APPLY_D(TYPE, TENSOR, -1, CODE) - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THTensorDimApply.h b/contrib/lua-torch/torch7/lib/TH/THTensorDimApply.h deleted file mode 100644 index 6727e1f7f0..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THTensorDimApply.h +++ /dev/null @@ -1,324 +0,0 @@ -#ifndef TH_TENSOR_DIM_APPLY_INC -#define TH_TENSOR_DIM_APPLY_INC - -#define TH_TENSOR_DIM_APPLY3(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, DIMENSION, CODE) \ -{ \ - TYPE1 *TENSOR1##_data = NULL; \ - long TENSOR1##_stride = 0, TENSOR1##_size = 0; \ - TYPE2 *TENSOR2##_data = NULL; \ - long TENSOR2##_stride = 0, TENSOR2##_size = 0; \ - TYPE3 *TENSOR3##_data = NULL; \ - long TENSOR3##_stride = 0, TENSOR3##_size = 0; \ - long *TH_TENSOR_DIM_APPLY_counter = NULL; \ - int TH_TENSOR_DIM_APPLY_hasFinished = 0; \ - int TH_TENSOR_DIM_APPLY_i; \ -\ - if( (DIMENSION < 0) || (DIMENSION >= TENSOR1->nDimension) ) \ - THError("invalid dimension %d (expected to be 0 <= dim < %d)", DIMENSION, TENSOR1->nDimension); \ - int same_dims = 1; \ - if( TENSOR1->nDimension != TENSOR2->nDimension ) { \ - same_dims = 0; \ - } \ - if( TENSOR1->nDimension != TENSOR3->nDimension ) { \ - same_dims = 0; \ - } \ - if (same_dims == 0) { \ - THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \ - THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \ - THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->nDimension); \ - THError("inconsistent tensor size, expected %s %s, %s %s and %s %s to have the same " \ - "number of dimensions", #TENSOR1, T1buff.str, #TENSOR2, T2buff.str, #TENSOR3, T3buff.str); \ - } \ - int shape_check_flag = 0; \ - for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \ - { \ - if(TH_TENSOR_DIM_APPLY_i == DIMENSION) \ - continue; \ - if(TENSOR1->size[TH_TENSOR_DIM_APPLY_i] != TENSOR2->size[TH_TENSOR_DIM_APPLY_i]) \ - shape_check_flag = 1; \ - if(TENSOR1->size[TH_TENSOR_DIM_APPLY_i] != TENSOR3->size[TH_TENSOR_DIM_APPLY_i]) \ - shape_check_flag = 1; \ - } \ - \ - if (shape_check_flag == 1) { \ - THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \ - THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \ - THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->nDimension); \ - THError("Expected %s %s, %s %s and %s %s to have the same size in dimension %d", \ - #TENSOR1, T1buff.str, #TENSOR2, T2buff.str, #TENSOR3, T3buff.str, DIMENSION); \ - } \ -\ - TH_TENSOR_DIM_APPLY_counter = (long*)THAlloc(sizeof(long)*(TENSOR1->nDimension)); \ - for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \ - TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] = 0; \ -\ - TENSOR1##_data = (TENSOR1)->storage->data+(TENSOR1)->storageOffset; \ - TENSOR1##_stride = (TENSOR1)->stride[DIMENSION]; \ - TENSOR1##_size = TENSOR1->size[DIMENSION]; \ -\ - TENSOR2##_data = (TENSOR2)->storage->data+(TENSOR2)->storageOffset; \ - TENSOR2##_stride = (TENSOR2)->stride[DIMENSION]; \ - TENSOR2##_size = TENSOR2->size[DIMENSION]; \ -\ - TENSOR3##_data = (TENSOR3)->storage->data+(TENSOR3)->storageOffset; \ - TENSOR3##_stride = (TENSOR3)->stride[DIMENSION]; \ - TENSOR3##_size = TENSOR3->size[DIMENSION]; \ -\ - while(!TH_TENSOR_DIM_APPLY_hasFinished) \ - { \ - CODE \ -\ - if(TENSOR1->nDimension == 1) \ - break; \ - \ - for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \ - { \ - if(TH_TENSOR_DIM_APPLY_i == DIMENSION) \ - { \ - if(TH_TENSOR_DIM_APPLY_i == TENSOR1->nDimension-1) \ - { \ - TH_TENSOR_DIM_APPLY_hasFinished = 1; \ - break; \ - } \ - continue; \ - } \ -\ - TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i]++; \ - TENSOR1##_data += TENSOR1->stride[TH_TENSOR_DIM_APPLY_i]; \ - TENSOR2##_data += TENSOR2->stride[TH_TENSOR_DIM_APPLY_i]; \ - TENSOR3##_data += TENSOR3->stride[TH_TENSOR_DIM_APPLY_i]; \ -\ - if(TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] == TENSOR1->size[TH_TENSOR_DIM_APPLY_i]) \ - { \ - if(TH_TENSOR_DIM_APPLY_i == TENSOR1->nDimension-1) \ - { \ - TH_TENSOR_DIM_APPLY_hasFinished = 1; \ - break; \ - } \ - else \ - { \ - TENSOR1##_data -= TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i]*TENSOR1->stride[TH_TENSOR_DIM_APPLY_i]; \ - TENSOR2##_data -= TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i]*TENSOR2->stride[TH_TENSOR_DIM_APPLY_i]; \ - TENSOR3##_data -= TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i]*TENSOR3->stride[TH_TENSOR_DIM_APPLY_i]; \ - TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] = 0; \ - } \ - } \ - else \ - break; \ - } \ - } \ - THFree(TH_TENSOR_DIM_APPLY_counter); \ -} - -/** - * Similar to DIM_APPLY(...) but we maintain two sets of pointers: one for the first tensor - * and one for the second. The two tensors must have the same shape, other than at the - * specified DIMENSION. This function makes it easy to store the output from reducing the - * TENSOR at index. For example, in the sum example described below, we could instead do: - * - * long i = 0; - * TYPE1 sum; - * - * for (i = 0; i < TENSOR1##_size; ++i) { - * sum += TENSOR1##_data[i * TENSOR1##_stride] - * } - * *TENSOR2##_data = (TYPE2) sum; - * - * In particular, we guarantee that the offset into TENSOR2 will be what you would get if - * you applied all of the index values used to generate the offset into TENSOR1. - */ -#define TH_TENSOR_DIM_APPLY2(TYPE1, TENSOR1, TYPE2, TENSOR2, DIMENSION, CODE) \ -{ \ - TYPE1 *TENSOR1##_data = NULL; \ - long TENSOR1##_stride = 0, TENSOR1##_size = 0; \ - TYPE2 *TENSOR2##_data = NULL; \ - long TENSOR2##_stride = 0, TENSOR2##_size = 0; \ - long *TH_TENSOR_DIM_APPLY_counter = NULL; \ - int TH_TENSOR_DIM_APPLY_hasFinished = 0; \ - int TH_TENSOR_DIM_APPLY_i; \ -\ - if( (DIMENSION < 0) || (DIMENSION >= TENSOR1->nDimension) ) \ - THError("invalid dimension %d (expected to be 0 <= dim < %d)", DIMENSION, TENSOR1->nDimension); \ - if( TENSOR1->nDimension != TENSOR2->nDimension ) { \ - THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \ - THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \ - THError("inconsistent tensor size, expected %s %s and %s %s to have the same " \ - "number of dimensions", #TENSOR1, T1buff.str, #TENSOR2, T2buff.str); \ - } \ - int shape_check_flag = 0; \ - for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \ - { \ - if(TH_TENSOR_DIM_APPLY_i == DIMENSION) \ - continue; \ - if(TENSOR1->size[TH_TENSOR_DIM_APPLY_i] != TENSOR2->size[TH_TENSOR_DIM_APPLY_i]) { \ - THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \ - THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \ - THError("Expected %s %s and %s %s to have the same size in dimension %d", \ - #TENSOR1, T1buff.str, #TENSOR2, T2buff.str, DIMENSION); \ - } \ - } \ -\ - TH_TENSOR_DIM_APPLY_counter = (long*)THAlloc(sizeof(long)*(TENSOR1->nDimension)); \ - for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \ - TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] = 0; \ -\ - TENSOR1##_data = (TENSOR1)->storage->data+(TENSOR1)->storageOffset; \ - TENSOR1##_stride = (TENSOR1)->stride[DIMENSION]; \ - TENSOR1##_size = TENSOR1->size[DIMENSION]; \ -\ - TENSOR2##_data = (TENSOR2)->storage->data+(TENSOR2)->storageOffset; \ - TENSOR2##_stride = (TENSOR2)->stride[DIMENSION]; \ - TENSOR2##_size = TENSOR2->size[DIMENSION]; \ -\ - while(!TH_TENSOR_DIM_APPLY_hasFinished) \ - { \ - CODE \ -\ - if(TENSOR1->nDimension == 1) \ - break; \ - \ - for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \ - { \ - if(TH_TENSOR_DIM_APPLY_i == DIMENSION) \ - { \ - if(TH_TENSOR_DIM_APPLY_i == TENSOR1->nDimension-1) \ - { \ - TH_TENSOR_DIM_APPLY_hasFinished = 1; \ - break; \ - } \ - continue; \ - } \ -\ - TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i]++; \ - TENSOR1##_data += TENSOR1->stride[TH_TENSOR_DIM_APPLY_i]; \ - TENSOR2##_data += TENSOR2->stride[TH_TENSOR_DIM_APPLY_i]; \ -\ - if(TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] == TENSOR1->size[TH_TENSOR_DIM_APPLY_i]) \ - { \ - if(TH_TENSOR_DIM_APPLY_i == TENSOR1->nDimension-1) \ - { \ - TH_TENSOR_DIM_APPLY_hasFinished = 1; \ - break; \ - } \ - else \ - { \ - TENSOR1##_data -= TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i]*TENSOR1->stride[TH_TENSOR_DIM_APPLY_i]; \ - TENSOR2##_data -= TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i]*TENSOR2->stride[TH_TENSOR_DIM_APPLY_i]; \ - TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] = 0; \ - } \ - } \ - else \ - break; \ - } \ - } \ - THFree(TH_TENSOR_DIM_APPLY_counter); \ -} - -/** - * The basic idea for DIM_APPLY: Given a TENSOR and a DIMENSION, provide access to the data stored - * at all sets of dimension values other than DIMENSION, such that we can get all the values at those - * fixed indices for the various values at DIMENSION. - * - * Suppose we have a 2x3x4 Tensor A, and we have DIMENSION=2. Then we will hit CODE (2x3) times, and the - * pointer into storage will be at: - * - * A[0][0] - * A[0][1] - * A[0][2] - * A[1][0] - * A[1][1] - * A[1][2] - * - * And at each point, we can access the data for each of the four elements of the Tensor via - * TENSOR##_stride. So for example, if we wanted to sum the elements there, we could do: - * - * long i = 0; - * TYPE sum; - * for (i = 0; i < TENSOR##_size; i++) { - * sum += TENSOR##_data[i * TENSOR##_stride] - * } - * - * Note that we don't have to have DIMENSION be the last tensor. If we have DIMENSION=1, then we will hit the - * code (2x4) times, with pointer into the storage at: - * - * offset + - * stride_0 * 0 + stride_2 * 0 - * stride_0 * 1 + stride_2 * 0 - * stride_0 * 0 + stride_2 * 1 - * stride_0 * 1 + stride_2 * 1 - * stride_0 * 0 + stride_2 * 2 - * stride_0 * 1 + stride_2 * 2 - * stride_0 * 0 + stride_2 * 3 - * stride_0 * 1 + stride_2 * 3 - * - * So we can again sum over the values at DIMENSION with the other indices fixed. - */ -#define TH_TENSOR_DIM_APPLY(TYPE, TENSOR, DIMENSION, CODE) \ -{ \ - TYPE *TENSOR##_data = NULL; \ - long TENSOR##_stride = 0, TENSOR##_size = 0; \ - long *TH_TENSOR_DIM_APPLY_counter = NULL; \ - int TH_TENSOR_DIM_APPLY_hasFinished = 0; \ - int TH_TENSOR_DIM_APPLY_i; \ -\ - if( (DIMENSION < 0) || (DIMENSION >= TENSOR->nDimension) ) \ - THError("invalid dimension"); \ -\ - TENSOR##_data = (TENSOR)->storage->data+(TENSOR)->storageOffset; \ - TENSOR##_stride = (TENSOR)->stride[DIMENSION]; \ - TENSOR##_size = TENSOR->size[DIMENSION]; \ - /* Counter stores the indices into the Tensor at any time */ \ - TH_TENSOR_DIM_APPLY_counter = (long*)THAlloc(sizeof(long)*(TENSOR->nDimension)); \ - for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR->nDimension; TH_TENSOR_DIM_APPLY_i++) \ - TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] = 0; \ -\ - while(!TH_TENSOR_DIM_APPLY_hasFinished) \ - { \ - CODE \ -\ - if(TENSOR->nDimension == 1) \ - break; \ - \ - for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR->nDimension; TH_TENSOR_DIM_APPLY_i++) \ - { \ - /* Check if the index is equal to DIMENSION. We don't need to update the */ \ - /* offset if this is the case, and can consider the next index. However, */ \ - /* in the case that the DIMENSION is the last index in the Tensor, then */ \ - /* we have parsed the entire tensor and can exit */ \ - if(TH_TENSOR_DIM_APPLY_i == DIMENSION) \ - { \ - if(TH_TENSOR_DIM_APPLY_i == TENSOR->nDimension-1) \ - { \ - TH_TENSOR_DIM_APPLY_hasFinished = 1; \ - break; \ - } \ - continue; \ - } \ -\ - /* Bump the counter at this index, update the pointer */ \ - TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i]++; \ - TENSOR##_data += TENSOR->stride[TH_TENSOR_DIM_APPLY_i]; \ -\ - if(TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] == TENSOR->size[TH_TENSOR_DIM_APPLY_i]) \ - { \ - /* Handled TENSOR_size(dim) iterations for DIM_APPLY_i. If this is the last dimension, exit */ \ - if(TH_TENSOR_DIM_APPLY_i == TENSOR->nDimension-1) \ - { \ - TH_TENSOR_DIM_APPLY_hasFinished = 1; \ - break; \ - } \ - else \ - { \ - /* Reset the counter, and the pointer to the beginning of the storage for this combination of indices */ \ - TENSOR##_data -= TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i]*TENSOR->stride[TH_TENSOR_DIM_APPLY_i]; \ - TH_TENSOR_DIM_APPLY_counter[TH_TENSOR_DIM_APPLY_i] = 0; \ - } \ - } \ - else \ - break; \ - } \ - } \ - THFree(TH_TENSOR_DIM_APPLY_counter); \ -} - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THTensorMacros.h b/contrib/lua-torch/torch7/lib/TH/THTensorMacros.h deleted file mode 100644 index 15b67665e7..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THTensorMacros.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef TH_TENSOR_MACROS_INC -#define TH_TENSOR_MACROS_INC - -/* fast method to access to tensor data */ - -#define THTensor_fastGet1d(self, x0) \ - (((self)->storage->data+(self)->storageOffset)[(x0)*(self)->stride[0]]) - -#define THTensor_fastGet2d(self, x0, x1) \ - (((self)->storage->data+(self)->storageOffset)[(x0)*(self)->stride[0]+(x1)*(self)->stride[1]]) - -#define THTensor_fastGet3d(self, x0, x1, x2) \ - (((self)->storage->data+(self)->storageOffset)[(x0)*(self)->stride[0]+(x1)*(self)->stride[1]+(x2)*(self)->stride[2]]) - -#define THTensor_fastGet4d(self, x0, x1, x2, x3) \ - (((self)->storage->data+(self)->storageOffset)[(x0)*(self)->stride[0]+(x1)*(self)->stride[1]+(x2)*(self)->stride[2]+(x3)*(self)->stride[3]]) - -#define THTensor_fastSet1d(self, x0, value) \ - (((self)->storage->data+(self)->storageOffset)[(x0)*(self)->stride[0]] = value) - -#define THTensor_fastSet2d(self, x0, x1, value) \ - (((self)->storage->data+(self)->storageOffset)[(x0)*(self)->stride[0]+(x1)*(self)->stride[1]] = value) - -#define THTensor_fastSet3d(self, x0, x1, x2, value) \ - (((self)->storage->data+(self)->storageOffset)[(x0)*(self)->stride[0]+(x1)*(self)->stride[1]+(x2)*(self)->stride[2]] = value) - -#define THTensor_fastSet4d(self, x0, x1, x2, x3, value) \ - (((self)->storage->data+(self)->storageOffset)[(x0)*(self)->stride[0]+(x1)*(self)->stride[1]+(x2)*(self)->stride[2]+(x3)*(self)->stride[3]] = value) - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/THVector.c b/contrib/lua-torch/torch7/lib/TH/THVector.c deleted file mode 100644 index 4410578846..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THVector.c +++ /dev/null @@ -1,30 +0,0 @@ -#include "THVector.h" - -#include "generic/simd/simd.h" - -#ifdef __NEON__ -#include "vector/NEON.c" -#endif - -#ifdef __PPC64__ -#include "vector/VSX.c" -#endif - -#if defined(USE_SSE2) || defined(USE_SSE3) || defined(USE_SSSE3) \ - || defined(USE_SSE4_1) || defined(USE_SSE4_2) -#include "vector/SSE.c" -#endif - -#if defined(USE_AVX) -#include "vector/AVX.h" -#endif - -#if defined(USE_AVX2) -#include "vector/AVX2.h" -#endif - -#include "generic/THVectorDefault.c" -#include "THGenerateAllTypes.h" - -#include "generic/THVectorDispatch.c" -#include "THGenerateAllTypes.h" diff --git a/contrib/lua-torch/torch7/lib/TH/THVector.h b/contrib/lua-torch/torch7/lib/TH/THVector.h deleted file mode 100644 index e29917b93e..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/THVector.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef TH_VECTOR_INC -#define TH_VECTOR_INC - -#include "THGeneral.h" - -#define THVector_(NAME) TH_CONCAT_4(TH,Real,Vector_,NAME) - -/* We are going to use dynamic dispatch, and want only to generate declarations - * of the vector functions */ -#include "generic/THVector.h" -#include "THGenerateAllTypes.h" - -#endif // TH_VECTOR_INC diff --git a/contrib/lua-torch/torch7/lib/TH/cmake/FindARM.cmake b/contrib/lua-torch/torch7/lib/TH/cmake/FindARM.cmake deleted file mode 100644 index 2dcb2a24f2..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/cmake/FindARM.cmake +++ /dev/null @@ -1,76 +0,0 @@ -# Check if the processor is an ARM and if Neon instruction are available on the machine where -# the project is compiled. - -IF(CMAKE_SYSTEM_NAME MATCHES "Linux") - EXEC_PROGRAM(cat ARGS "/proc/cpuinfo" OUTPUT_VARIABLE CPUINFO) - - #neon instruction can be found on the majority part of modern ARM processor - STRING(REGEX REPLACE "^.*(neon).*$" "\\1" NEON_THERE ${CPUINFO}) - STRING(COMPARE EQUAL "neon" "${NEON_THERE}" NEON_TRUE) - IF (NEON_TRUE) - set(NEON_FOUND true CACHE BOOL "NEON available on host") - ELSE (NEON_TRUE) - set(NEON_FOUND false CACHE BOOL "NEON available on host") - ENDIF (NEON_TRUE) - - # on ARMv8, neon is inherit and instead listed as 'asimd' in /proc/cpuinfo - STRING(REGEX REPLACE "^.*(asimd).*$" "\\1" ASIMD_THERE ${CPUINFO}) - STRING(COMPARE EQUAL "asimd" "${ASIMD_THERE}" ASIMD_TRUE) - IF (ASIMD_TRUE) - set(ASIMD_FOUND true CACHE BOOL "ASIMD/NEON available on host") - ELSE (ASIMD_TRUE) - set(ASIMD_FOUND false CACHE BOOL "ASIMD/NEON available on host") - ENDIF (ASIMD_TRUE) - - #Find the processor type (for now OMAP3 or OMAP4) - STRING(REGEX REPLACE "^.*(OMAP3).*$" "\\1" OMAP3_THERE ${CPUINFO}) - STRING(COMPARE EQUAL "OMAP3" "${OMAP3_THERE}" OMAP3_TRUE) - IF (OMAP3_TRUE) - set(CORTEXA8_FOUND true CACHE BOOL "OMAP3 available on host") - ELSE (OMAP3_TRUE) - set(CORTEXA8_FOUND false CACHE BOOL "OMAP3 available on host") - ENDIF (OMAP3_TRUE) - - #Find the processor type (for now OMAP3 or OMAP4) - STRING(REGEX REPLACE "^.*(OMAP4).*$" "\\1" OMAP4_THERE ${CPUINFO}) - STRING(COMPARE EQUAL "OMAP4" "${OMAP4_THERE}" OMAP4_TRUE) - IF (OMAP4_TRUE) - set(CORTEXA9_FOUND true CACHE BOOL "OMAP4 available on host") - ELSE (OMAP4_TRUE) - set(CORTEXA9_FOUND false CACHE BOOL "OMAP4 available on host") - ENDIF (OMAP4_TRUE) - -ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Darwin") - EXEC_PROGRAM("/usr/sbin/sysctl -n machdep.cpu.features" OUTPUT_VARIABLE - CPUINFO) - - #neon instruction can be found on the majority part of modern ARM processor - STRING(REGEX REPLACE "^.*(neon).*$" "\\1" NEON_THERE ${CPUINFO}) - STRING(COMPARE EQUAL "neon" "${NEON_THERE}" NEON_TRUE) - IF (NEON_TRUE) - set(NEON_FOUND true CACHE BOOL "NEON available on host") - ELSE (NEON_TRUE) - set(NEON_FOUND false CACHE BOOL "NEON available on host") - ENDIF (NEON_TRUE) - -ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Windows") - # TODO - set(CORTEXA8_FOUND false CACHE BOOL "OMAP3 not available on host") - set(CORTEXA9_FOUND false CACHE BOOL "OMAP4 not available on host") - set(NEON_FOUND false CACHE BOOL "NEON not available on host") -ELSE(CMAKE_SYSTEM_NAME MATCHES "Linux") - set(CORTEXA8_FOUND false CACHE BOOL "OMAP3 not available on host") - set(CORTEXA9_FOUND false CACHE BOOL "OMAP4 not available on host") - set(NEON_FOUND false CACHE BOOL "NEON not available on host") -ENDIF(CMAKE_SYSTEM_NAME MATCHES "Linux") - -if(NOT NEON_FOUND) - MESSAGE(STATUS "Could not find hardware support for NEON on this machine.") -endif(NOT NEON_FOUND) -if(NOT CORTEXA8_FOUND) - MESSAGE(STATUS "No OMAP3 processor on this machine.") -endif(NOT CORTEXA8_FOUND) -if(NOT CORTEXA9_FOUND) - MESSAGE(STATUS "No OMAP4 processor on this machine.") -endif(NOT CORTEXA9_FOUND) -mark_as_advanced(NEON_FOUND) diff --git a/contrib/lua-torch/torch7/lib/TH/cmake/FindBLAS.cmake b/contrib/lua-torch/torch7/lib/TH/cmake/FindBLAS.cmake deleted file mode 100644 index 1f254d231c..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/cmake/FindBLAS.cmake +++ /dev/null @@ -1,309 +0,0 @@ -# - Find BLAS library -# This module finds an installed fortran library that implements the BLAS -# linear-algebra interface (see http://www.netlib.org/blas/). -# The list of libraries searched for is taken -# from the autoconf macro file, acx_blas.m4 (distributed at -# http://ac-archive.sourceforge.net/ac-archive/acx_blas.html). -# -# This module sets the following variables: -# BLAS_FOUND - set to true if a library implementing the BLAS interface is found. -# BLAS_INFO - name of the detected BLAS library. -# BLAS_F2C - set to true if following the f2c return convention -# BLAS_LIBRARIES - list of libraries to link against to use BLAS -# BLAS_INCLUDE_DIR - include directory - -# Do nothing is BLAS was found before -IF(NOT BLAS_FOUND) - -SET(BLAS_LIBRARIES) -SET(BLAS_INCLUDE_DIR) -SET(BLAS_INFO) -SET(BLAS_F2C) - -SET(WITH_BLAS "" CACHE STRING "Blas type [mkl/open/goto/acml/atlas/accelerate/veclib/generic]") - -# Old FindBlas -INCLUDE(CheckCSourceRuns) -INCLUDE(CheckFortranFunctionExists) - -MACRO(Check_Fortran_Libraries LIBRARIES _prefix _name _flags _list) - # This macro checks for the existence of the combination of fortran libraries - # given by _list. If the combination is found, this macro checks (using the - # Check_Fortran_Function_Exists macro) whether can link against that library - # combination using the name of a routine given by _name using the linker - # flags given by _flags. If the combination of libraries is found and passes - # the link test, LIBRARIES is set to the list of complete library paths that - # have been found. Otherwise, LIBRARIES is set to NOTFOUND. - # N.B. _prefix is the prefix applied to the names of all cached variables that - # are generated internally and marked advanced by this macro. - - set(__list) - foreach(_elem ${_list}) - if(__list) - set(__list "${__list} - ${_elem}") - else(__list) - set(__list "${_elem}") - endif(__list) - endforeach(_elem) - message(STATUS "Checking for [${__list}]") - - set(_libraries_work TRUE) - set(${LIBRARIES}) - set(_combined_name) - foreach(_library ${_list}) - set(_combined_name ${_combined_name}_${_library}) - if(_libraries_work) - if ( WIN32 ) - find_library(${_prefix}_${_library}_LIBRARY - NAMES ${_library} - PATHS ENV LIB - PATHS ENV PATH ) - endif ( WIN32 ) - if ( APPLE ) - find_library(${_prefix}_${_library}_LIBRARY - NAMES ${_library} - PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64 - ENV DYLD_LIBRARY_PATH ) - else ( APPLE ) - find_library(${_prefix}_${_library}_LIBRARY - NAMES ${_library} - PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64 - ENV LD_LIBRARY_PATH ) - endif( APPLE ) - mark_as_advanced(${_prefix}_${_library}_LIBRARY) - set(${LIBRARIES} ${${LIBRARIES}} ${${_prefix}_${_library}_LIBRARY}) - set(_libraries_work ${${_prefix}_${_library}_LIBRARY}) - MESSAGE(STATUS " Library ${_library}: ${${_prefix}_${_library}_LIBRARY}") - endif(_libraries_work) - endforeach(_library ${_list}) - if(_libraries_work) - # Test this combination of libraries. - set(CMAKE_REQUIRED_LIBRARIES ${_flags} ${${LIBRARIES}}) - if (CMAKE_Fortran_COMPILER_WORKS) - check_fortran_function_exists(${_name} ${_prefix}${_combined_name}_WORKS) - else (CMAKE_Fortran_COMPILER_WORKS) - check_function_exists("${_name}_" ${_prefix}${_combined_name}_WORKS) - endif (CMAKE_Fortran_COMPILER_WORKS) - set(CMAKE_REQUIRED_LIBRARIES) - mark_as_advanced(${_prefix}${_combined_name}_WORKS) - set(_libraries_work ${${_prefix}${_combined_name}_WORKS}) - endif(_libraries_work) - if(NOT _libraries_work) - set(${LIBRARIES} NOTFOUND) - endif(NOT _libraries_work) -endmacro(Check_Fortran_Libraries) - -# Intel MKL? -if((NOT BLAS_LIBRARIES) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "mkl"))) - FIND_PACKAGE(MKL) - IF(MKL_FOUND) - SET(BLAS_INFO "mkl") - SET(BLAS_LIBRARIES ${MKL_LIBRARIES}) - SET(BLAS_INCLUDE_DIR ${MKL_INCLUDE_DIR}) - SET(BLAS_VERSION ${MKL_VERSION}) - ENDIF(MKL_FOUND) -endif() - -if((NOT BLAS_LIBRARIES) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "open"))) - check_fortran_libraries( - BLAS_LIBRARIES - BLAS - sgemm - "" - "openblas") - if(BLAS_LIBRARIES) - set(BLAS_INFO "open") - endif(BLAS_LIBRARIES) -endif() - -if((NOT BLAS_LIBRARIES) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "open"))) - check_fortran_libraries( - BLAS_LIBRARIES - BLAS - sgemm - "" - "openblas;pthread") - if(BLAS_LIBRARIES) - set(BLAS_INFO "open") - endif(BLAS_LIBRARIES) -endif() - -if((NOT BLAS_LIBRARIES) AND (WIN32) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "open"))) - check_fortran_libraries( - BLAS_LIBRARIES - BLAS - sgemm - "" - "libopenblas") - if(BLAS_LIBRARIES) - set(BLAS_INFO "open") - endif(BLAS_LIBRARIES) -endif() - -if((NOT BLAS_LIBRARIES) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "goto"))) - check_fortran_libraries( - BLAS_LIBRARIES - BLAS - sgemm - "" - "goto2;gfortran") - if (BLAS_LIBRARIES) - set(BLAS_INFO "goto") - endif (BLAS_LIBRARIES) -endif() - -if((NOT BLAS_LIBRARIES) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "goto"))) - check_fortran_libraries( - BLAS_LIBRARIES - BLAS - sgemm - "" - "goto2;gfortran;pthread") - if (BLAS_LIBRARIES) - set(BLAS_INFO "goto") - endif (BLAS_LIBRARIES) -endif() - -if((NOT BLAS_LIBRARIES) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "acml"))) - check_fortran_libraries( - BLAS_LIBRARIES - BLAS - sgemm - "" - "acml;gfortran") - if (BLAS_LIBRARIES) - set(BLAS_INFO "acml") - endif (BLAS_LIBRARIES) -endif() - -# Apple BLAS library? -if((NOT BLAS_LIBRARIES) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "accelerate"))) - check_fortran_libraries( - BLAS_LIBRARIES - BLAS - sgemm - "" - "Accelerate") - if (BLAS_LIBRARIES) - set(BLAS_INFO "accelerate") - set(BLAS_IS_ACCELERATE 1) - endif (BLAS_LIBRARIES) -endif() - -if((NOT BLAS_LIBRARIES) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "veclib"))) - check_fortran_libraries( - BLAS_LIBRARIES - BLAS - sgemm - "" - "vecLib") - if (BLAS_LIBRARIES) - set(BLAS_INFO "veclib") - endif (BLAS_LIBRARIES) -endif() - -# BLAS in ATLAS library? (http://math-atlas.sourceforge.net/) -if((NOT BLAS_LIBRARIES) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "atlas"))) - check_fortran_libraries( - BLAS_LIBRARIES - BLAS - sgemm - "" - "ptf77blas;atlas;gfortran") - if (BLAS_LIBRARIES) - set(BLAS_INFO "atlas") - endif (BLAS_LIBRARIES) -endif() - -# Generic BLAS library? -if((NOT BLAS_LIBRARIES) - AND ((NOT WITH_BLAS) OR (WITH_BLAS STREQUAL "generic"))) - check_fortran_libraries( - BLAS_LIBRARIES - BLAS - sgemm - "" - "blas") - if (BLAS_LIBRARIES) - check_fortran_libraries( - TMP_BLAS_LIBRARIES - TMP_BLAS - openblas_get_num_threads - "" - "blas") - if (TMP_BLAS_LIBRARIES) - set(BLAS_INFO "open") - else() - set(BLAS_INFO "generic") - endif() - endif (BLAS_LIBRARIES) -endif() - -# Determine if blas was compiled with the f2c conventions -IF (BLAS_LIBRARIES) - SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) - CHECK_C_SOURCE_RUNS(" -#include -#include -float x[4] = { 1, 2, 3, 4 }; -float y[4] = { .1, .01, .001, .0001 }; -int four = 4; -int one = 1; -extern double sdot_(); -int main() { - int i; - double r = sdot_(&four, x, &one, y, &one); - exit((float)r != (float).1234); -}" BLAS_F2C_DOUBLE_WORKS ) - CHECK_C_SOURCE_RUNS(" -#include -#include -float x[4] = { 1, 2, 3, 4 }; -float y[4] = { .1, .01, .001, .0001 }; -int four = 4; -int one = 1; -extern float sdot_(); -int main() { - int i; - double r = sdot_(&four, x, &one, y, &one); - exit((float)r != (float).1234); -}" BLAS_F2C_FLOAT_WORKS ) - IF (BLAS_F2C_DOUBLE_WORKS AND NOT BLAS_F2C_FLOAT_WORKS) - MESSAGE(STATUS "This BLAS uses the F2C return conventions") - SET(BLAS_F2C TRUE) - ELSE (BLAS_F2C_DOUBLE_WORKS AND NOT BLAS_F2C_FLOAT_WORKS) - SET(BLAS_F2C FALSE) - ENDIF (BLAS_F2C_DOUBLE_WORKS AND NOT BLAS_F2C_FLOAT_WORKS) -ENDIF(BLAS_LIBRARIES) - -# epilogue - -if(BLAS_LIBRARIES) - set(BLAS_FOUND TRUE) -else(BLAS_LIBRARIES) - set(BLAS_FOUND FALSE) -endif(BLAS_LIBRARIES) - -IF (NOT BLAS_FOUND AND BLAS_FIND_REQUIRED) - message(FATAL_ERROR "Cannot find a library with BLAS API. Please specify library location.") -ENDIF (NOT BLAS_FOUND AND BLAS_FIND_REQUIRED) -IF(NOT BLAS_FIND_QUIETLY) - IF(BLAS_FOUND) - MESSAGE(STATUS "Found a library with BLAS API (${BLAS_INFO}).") - ELSE(BLAS_FOUND) - MESSAGE(STATUS "Cannot find a library with BLAS API. Not using BLAS.") - ENDIF(BLAS_FOUND) -ENDIF(NOT BLAS_FIND_QUIETLY) - -# Do nothing is BLAS was found before -ENDIF(NOT BLAS_FOUND) diff --git a/contrib/lua-torch/torch7/lib/TH/cmake/FindLAPACK.cmake b/contrib/lua-torch/torch7/lib/TH/cmake/FindLAPACK.cmake deleted file mode 100644 index 9eca0730fc..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/cmake/FindLAPACK.cmake +++ /dev/null @@ -1,190 +0,0 @@ -# - Find LAPACK library -# This module finds an installed fortran library that implements the LAPACK -# linear-algebra interface (see http://www.netlib.org/lapack/). -# -# The approach follows that taken for the autoconf macro file, acx_lapack.m4 -# (distributed at http://ac-archive.sourceforge.net/ac-archive/acx_lapack.html). -# -# This module sets the following variables: -# LAPACK_FOUND - set to true if a library implementing the LAPACK interface is found -# LAPACK_LIBRARIES - list of libraries (using full path name) for LAPACK - -# Note: I do not think it is a good idea to mixup different BLAS/LAPACK versions -# Hence, this script wants to find a Lapack library matching your Blas library - -# Do nothing if LAPACK was found before -IF(NOT LAPACK_FOUND) - -SET(LAPACK_LIBRARIES) -SET(LAPACK_INFO) - -IF(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED) - FIND_PACKAGE(BLAS) -ELSE(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED) - FIND_PACKAGE(BLAS REQUIRED) -ENDIF(LAPACK_FIND_QUIETLY OR NOT LAPACK_FIND_REQUIRED) - -# Old search lapack script -include(CheckFortranFunctionExists) - -macro(Check_Lapack_Libraries LIBRARIES _prefix _name _flags _list _blas) - # This macro checks for the existence of the combination of fortran libraries - # given by _list. If the combination is found, this macro checks (using the - # Check_Fortran_Function_Exists macro) whether can link against that library - # combination using the name of a routine given by _name using the linker - # flags given by _flags. If the combination of libraries is found and passes - # the link test, LIBRARIES is set to the list of complete library paths that - # have been found. Otherwise, LIBRARIES is set to FALSE. - # N.B. _prefix is the prefix applied to the names of all cached variables that - # are generated internally and marked advanced by this macro. - set(_libraries_work TRUE) - set(${LIBRARIES}) - set(_combined_name) - foreach(_library ${_list}) - set(_combined_name ${_combined_name}_${_library}) - if(_libraries_work) - if (WIN32) - find_library(${_prefix}_${_library}_LIBRARY - NAMES ${_library} PATHS ENV LIB PATHS ENV PATH) - else (WIN32) - if(APPLE) - find_library(${_prefix}_${_library}_LIBRARY - NAMES ${_library} - PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64 - ENV DYLD_LIBRARY_PATH) - else(APPLE) - find_library(${_prefix}_${_library}_LIBRARY - NAMES ${_library} - PATHS /usr/local/lib /usr/lib /usr/local/lib64 /usr/lib64 - ENV LD_LIBRARY_PATH) - endif(APPLE) - endif(WIN32) - mark_as_advanced(${_prefix}_${_library}_LIBRARY) - set(${LIBRARIES} ${${LIBRARIES}} ${${_prefix}_${_library}_LIBRARY}) - set(_libraries_work ${${_prefix}_${_library}_LIBRARY}) - endif(_libraries_work) - endforeach(_library ${_list}) - if(_libraries_work) - # Test this combination of libraries. - set(CMAKE_REQUIRED_LIBRARIES ${_flags} ${${LIBRARIES}} ${_blas}) - if (CMAKE_Fortran_COMPILER_WORKS) - check_fortran_function_exists(${_name} ${_prefix}${_combined_name}_WORKS) - else (CMAKE_Fortran_COMPILER_WORKS) - check_function_exists("${_name}_" ${_prefix}${_combined_name}_WORKS) - endif (CMAKE_Fortran_COMPILER_WORKS) - set(CMAKE_REQUIRED_LIBRARIES) - mark_as_advanced(${_prefix}${_combined_name}_WORKS) - set(_libraries_work ${${_prefix}${_combined_name}_WORKS}) - endif(_libraries_work) - if(NOT _libraries_work) - set(${LIBRARIES} FALSE) - endif(NOT _libraries_work) -endmacro(Check_Lapack_Libraries) - - -if(BLAS_FOUND) - - # Intel MKL - IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "mkl")) - IF(MKL_LAPACK_LIBRARIES) - SET(LAPACK_LIBRARIES ${MKL_LAPACK_LIBRARIES} ${MKL_LIBRARIES}) - ELSE(MKL_LAPACK_LIBRARIES) - SET(LAPACK_LIBRARIES ${MKL_LIBRARIES}) - ENDIF(MKL_LAPACK_LIBRARIES) - SET(LAPACK_INCLUDE_DIR ${MKL_INCLUDE_DIR}) - SET(LAPACK_INFO "mkl") - ENDIF() - - # OpenBlas - IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "open")) - SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) - check_function_exists("cheev_" OPEN_LAPACK_WORKS) - if(OPEN_LAPACK_WORKS) - SET(LAPACK_INFO "open") - else() - message(STATUS "It seems OpenBlas has not been compiled with Lapack support") - endif() - endif() - - # GotoBlas - IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "goto")) - SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) - check_function_exists("cheev_" GOTO_LAPACK_WORKS) - if(GOTO_LAPACK_WORKS) - SET(LAPACK_INFO "goto") - else() - message(STATUS "It seems GotoBlas has not been compiled with Lapack support") - endif() - endif() - - # ACML - IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "acml")) - SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) - check_function_exists("cheev_" ACML_LAPACK_WORKS) - if(ACML_LAPACK_WORKS) - SET(LAPACK_INFO "acml") - else() - message(STATUS "Strangely, this ACML library does not support Lapack?!") - endif() - endif() - - # Accelerate - IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "accelerate")) - SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) - check_function_exists("cheev_" ACCELERATE_LAPACK_WORKS) - if(ACCELERATE_LAPACK_WORKS) - SET(LAPACK_INFO "accelerate") - else() - message(STATUS "Strangely, this Accelerate library does not support Lapack?!") - endif() - endif() - - # vecLib - IF((NOT LAPACK_INFO) AND (BLAS_INFO STREQUAL "veclib")) - SET(CMAKE_REQUIRED_LIBRARIES ${BLAS_LIBRARIES}) - check_function_exists("cheev_" VECLIB_LAPACK_WORKS) - if(VECLIB_LAPACK_WORKS) - SET(LAPACK_INFO "veclib") - else() - message(STATUS "Strangely, this vecLib library does not support Lapack?!") - endif() - endif() - - # Generic LAPACK library? - IF((NOT LAPACK_INFO) AND ((BLAS_INFO STREQUAL "generic") OR (BLAS_INFO STREQUAL "open"))) - check_lapack_libraries( - LAPACK_LIBRARIES - LAPACK - cheev - "" - "lapack" - "${BLAS_LIBRARIES}" - ) - if(LAPACK_LIBRARIES) - SET(LAPACK_INFO "generic") - endif(LAPACK_LIBRARIES) - endif() - -else(BLAS_FOUND) - message(STATUS "LAPACK requires BLAS") -endif(BLAS_FOUND) - -if(LAPACK_INFO) - set(LAPACK_FOUND TRUE) -else(LAPACK_INFO) - set(LAPACK_FOUND FALSE) -endif(LAPACK_INFO) - -IF (NOT LAPACK_FOUND AND LAPACK_FIND_REQUIRED) - message(FATAL_ERROR "Cannot find a library with LAPACK API. Please specify library location.") -ENDIF (NOT LAPACK_FOUND AND LAPACK_FIND_REQUIRED) -IF(NOT LAPACK_FIND_QUIETLY) - IF(LAPACK_FOUND) - MESSAGE(STATUS "Found a library with LAPACK API. (${LAPACK_INFO})") - ELSE(LAPACK_FOUND) - MESSAGE(STATUS "Cannot find a library with LAPACK API. Not using LAPACK.") - ENDIF(LAPACK_FOUND) -ENDIF(NOT LAPACK_FIND_QUIETLY) - -# Do nothing if LAPACK was found before -ENDIF(NOT LAPACK_FOUND) diff --git a/contrib/lua-torch/torch7/lib/TH/cmake/FindMKL.cmake b/contrib/lua-torch/torch7/lib/TH/cmake/FindMKL.cmake deleted file mode 100644 index 08b4509853..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/cmake/FindMKL.cmake +++ /dev/null @@ -1,272 +0,0 @@ -# - Find INTEL MKL library -# -# This module finds the Intel Mkl libraries. -# -# This module sets the following variables: -# MKL_FOUND - set to true if a library implementing the CBLAS interface is found -# MKL_VERSION - best guess -# MKL_INCLUDE_DIR - path to include dir. -# MKL_LIBRARIES - list of libraries for base mkl -# MKL_LAPACK_LIBRARIES - list of libraries to add for lapack -# MKL_SCALAPACK_LIBRARIES - list of libraries to add for scalapack -# MKL_SOLVER_LIBRARIES - list of libraries to add for the solvers -# MKL_CDFT_LIBRARIES - list of libraries to add for the solvers - - -# Do nothing if MKL_FOUND was set before! -IF (NOT MKL_FOUND) - -SET(MKL_VERSION) -SET(MKL_INCLUDE_DIR) -SET(MKL_LIBRARIES) -SET(MKL_LAPACK_LIBRARIES) -SET(MKL_SCALAPACK_LIBRARIES) -SET(MKL_SOLVER_LIBRARIES) -SET(MKL_CDFT_LIBRARIES) - -# Includes -INCLUDE(CheckTypeSize) -INCLUDE(CheckFunctionExists) - -# Intel Compiler Suite -SET(INTEL_COMPILER_DIR CACHE STRING - "Root directory of the Intel Compiler Suite (contains ipp, mkl, etc.)") -SET(INTEL_MKL_DIR CACHE STRING - "Root directory of the Intel MKL (standalone)") -SET(INTEL_MKL_SEQUENTIAL OFF CACHE BOOL - "Force using the sequential (non threaded) libraries") - -# Checks -CHECK_TYPE_SIZE("void*" SIZE_OF_VOIDP) -IF ("${SIZE_OF_VOIDP}" EQUAL 8) - SET(mklvers "em64t") - SET(iccvers "intel64") - SET(mkl64s "_lp64") -ELSE ("${SIZE_OF_VOIDP}" EQUAL 8) - SET(mklvers "32") - SET(iccvers "ia32") - SET(mkl64s) -ENDIF ("${SIZE_OF_VOIDP}" EQUAL 8) -IF(CMAKE_COMPILER_IS_GNUCC) - SET(mklthreads "mkl_gnu_thread" "mkl_intel_thread") - SET(mklifaces "gf" "intel") - SET(mklrtls "iomp5") -ELSE(CMAKE_COMPILER_IS_GNUCC) - SET(mklthreads "mkl_intel_thread") - SET(mklifaces "intel") - SET(mklrtls "iomp5" "guide") - IF (MSVC) - SET(mklrtls "libiomp5md") - ENDIF (MSVC) -ENDIF (CMAKE_COMPILER_IS_GNUCC) - -# Kernel libraries dynamically loaded -SET(mklkerlibs "mc" "mc3" "nc" "p4n" "p4m" "p4m3" "p4p" "def") -SET(mklseq) - - - -# Paths -SET(saved_CMAKE_LIBRARY_PATH ${CMAKE_LIBRARY_PATH}) -SET(saved_CMAKE_INCLUDE_PATH ${CMAKE_INCLUDE_PATH}) -IF (INTEL_COMPILER_DIR) - # TODO: diagnostic if dir does not exist - SET(CMAKE_LIBRARY_PATH ${CMAKE_LIBRARY_PATH} - "${INTEL_COMPILER_DIR}/lib/${iccvers}") - IF (NOT INTEL_MKL_DIR) - SET(INTEL_MKL_DIR "${INTEL_COMPILER_DIR}/mkl") - ENDIF (NOT INTEL_MKL_DIR) -ENDIF (INTEL_COMPILER_DIR) -IF (INTEL_MKL_DIR) - # TODO: diagnostic if dir does not exist - SET(CMAKE_INCLUDE_PATH ${CMAKE_INCLUDE_PATH} - "${INTEL_MKL_DIR}/include") - SET(CMAKE_LIBRARY_PATH ${CMAKE_LIBRARY_PATH} - "${INTEL_MKL_DIR}/lib/${mklvers}") - IF (MSVC) - SET(CMAKE_LIBRARY_PATH ${CMAKE_LIBRARY_PATH} - "${INTEL_MKL_DIR}/lib/${iccvers}") - ENDIF (MSVC) -ENDIF (INTEL_MKL_DIR) - -# Try linking multiple libs -MACRO(CHECK_ALL_LIBRARIES LIBRARIES _name _list _flags) - # This macro checks for the existence of the combination of libraries given by _list. - # If the combination is found, this macro whether we can link against that library - # combination using the name of a routine given by _name using the linker - # flags given by _flags. If the combination of libraries is found and passes - # the link test, LIBRARIES is set to the list of complete library paths that - # have been found. Otherwise, LIBRARIES is set to FALSE. - # N.B. _prefix is the prefix applied to the names of all cached variables that - # are generated internally and marked advanced by this macro. - SET(_prefix "${LIBRARIES}") - # start checking - SET(_libraries_work TRUE) - SET(${LIBRARIES}) - SET(_combined_name) - SET(_paths) - set(__list) - foreach(_elem ${_list}) - if(__list) - set(__list "${__list} - ${_elem}") - else(__list) - set(__list "${_elem}") - endif(__list) - endforeach(_elem) - message(STATUS "Checking for [${__list}]") - FOREACH(_library ${_list}) - SET(_combined_name ${_combined_name}_${_library}) - IF(_libraries_work) - FIND_LIBRARY(${_prefix}_${_library}_LIBRARY NAMES ${_library}) - MARK_AS_ADVANCED(${_prefix}_${_library}_LIBRARY) - SET(${LIBRARIES} ${${LIBRARIES}} ${${_prefix}_${_library}_LIBRARY}) - SET(_libraries_work ${${_prefix}_${_library}_LIBRARY}) - IF(${_prefix}_${_library}_LIBRARY) - MESSAGE(STATUS " Library ${_library}: ${${_prefix}_${_library}_LIBRARY}") - ELSE(${_prefix}_${_library}_LIBRARY) - MESSAGE(STATUS " Library ${_library}: not found") - ENDIF(${_prefix}_${_library}_LIBRARY) - ENDIF(_libraries_work) - ENDFOREACH(_library ${_list}) - # Test this combination of libraries. - IF(_libraries_work) - SET(CMAKE_REQUIRED_LIBRARIES ${_flags} ${${LIBRARIES}}) - CHECK_FUNCTION_EXISTS(${_name} ${_prefix}${_combined_name}_WORKS) - SET(CMAKE_REQUIRED_LIBRARIES) - MARK_AS_ADVANCED(${_prefix}${_combined_name}_WORKS) - SET(_libraries_work ${${_prefix}${_combined_name}_WORKS}) - ENDIF(_libraries_work) - # Fin - IF(_libraries_work) - ELSE (_libraries_work) - SET(${LIBRARIES}) - MARK_AS_ADVANCED(${LIBRARIES}) - ENDIF(_libraries_work) -ENDMACRO(CHECK_ALL_LIBRARIES) - -if(WIN32) - set(mkl_m "") -else(WIN32) - set(mkl_m "m") -endif(WIN32) - - -# Check for version 10/11 -IF (NOT MKL_LIBRARIES) - SET(MKL_VERSION 1011) -ENDIF (NOT MKL_LIBRARIES) -FOREACH(mklrtl ${mklrtls} "") - FOREACH(mkliface ${mklifaces}) - FOREACH(mkl64 ${mkl64s} "") - FOREACH(mklthread ${mklthreads}) - IF (NOT MKL_LIBRARIES AND NOT INTEL_MKL_SEQUENTIAL) - CHECK_ALL_LIBRARIES(MKL_LIBRARIES cblas_sgemm - "mkl_${mkliface}${mkl64};${mklthread};mkl_core;${mklrtl};pthread;${mkl_m}" "") - ENDIF (NOT MKL_LIBRARIES AND NOT INTEL_MKL_SEQUENTIAL) - ENDFOREACH(mklthread) - ENDFOREACH(mkl64) - ENDFOREACH(mkliface) -ENDFOREACH(mklrtl) -FOREACH(mklrtl ${mklrtls} "") - FOREACH(mkliface ${mklifaces}) - FOREACH(mkl64 ${mkl64s} "") - IF (NOT MKL_LIBRARIES) - CHECK_ALL_LIBRARIES(MKL_LIBRARIES cblas_sgemm - "mkl_${mkliface}${mkl64};mkl_sequential;mkl_core;${mkl_m}" "") - IF (MKL_LIBRARIES) - SET(mklseq "_sequential") - ENDIF (MKL_LIBRARIES) - ENDIF (NOT MKL_LIBRARIES) - ENDFOREACH(mkl64) - ENDFOREACH(mkliface) -ENDFOREACH(mklrtl) -FOREACH(mklrtl ${mklrtls} "") - FOREACH(mkliface ${mklifaces}) - FOREACH(mkl64 ${mkl64s} "") - FOREACH(mklthread ${mklthreads}) - IF (NOT MKL_LIBRARIES) - CHECK_ALL_LIBRARIES(MKL_LIBRARIES cblas_sgemm - "mkl_${mkliface}${mkl64};${mklthread};mkl_core;${mklrtl};pthread;${mkl_m}" "") - ENDIF (NOT MKL_LIBRARIES) - ENDFOREACH(mklthread) - ENDFOREACH(mkl64) - ENDFOREACH(mkliface) -ENDFOREACH(mklrtl) - -# Check for older versions -IF (NOT MKL_LIBRARIES) - SET(MKL_VERSION 900) - CHECK_ALL_LIBRARIES(MKL_LIBRARIES cblas_sgemm - "mkl;guide;pthread;m" "") -ENDIF (NOT MKL_LIBRARIES) - -# Include files -IF (MKL_LIBRARIES) - FIND_PATH(MKL_INCLUDE_DIR "mkl_cblas.h") - MARK_AS_ADVANCED(MKL_INCLUDE_DIR) -ENDIF (MKL_LIBRARIES) - -# Other libraries -IF (MKL_LIBRARIES) - FOREACH(mkl64 ${mkl64s} "_core" "") - FOREACH(mkls ${mklseq} "") - IF (NOT MKL_LAPACK_LIBRARIES) - FIND_LIBRARY(MKL_LAPACK_LIBRARIES NAMES "mkl_lapack${mkl64}${mkls}") - MARK_AS_ADVANCED(MKL_LAPACK_LIBRARIES) - ENDIF (NOT MKL_LAPACK_LIBRARIES) - IF (NOT MKL_SCALAPACK_LIBRARIES) - FIND_LIBRARY(MKL_SCALAPACK_LIBRARIES NAMES "mkl_scalapack${mkl64}${mkls}") - MARK_AS_ADVANCED(MKL_SCALAPACK_LIBRARIES) - ENDIF (NOT MKL_SCALAPACK_LIBRARIES) - IF (NOT MKL_SOLVER_LIBRARIES) - FIND_LIBRARY(MKL_SOLVER_LIBRARIES NAMES "mkl_solver${mkl64}${mkls}") - MARK_AS_ADVANCED(MKL_SOLVER_LIBRARIES) - ENDIF (NOT MKL_SOLVER_LIBRARIES) - IF (NOT MKL_CDFT_LIBRARIES) - FIND_LIBRARY(MKL_CDFT_LIBRARIES NAMES "mkl_cdft${mkl64}${mkls}") - MARK_AS_ADVANCED(MKL_CDFT_LIBRARIES) - ENDIF (NOT MKL_CDFT_LIBRARIES) - ENDFOREACH(mkls) - ENDFOREACH(mkl64) -ENDIF (MKL_LIBRARIES) - -# LibIRC: intel compiler always links this; -# gcc does not; but mkl kernels sometimes need it. -IF (MKL_LIBRARIES) - IF (CMAKE_COMPILER_IS_GNUCC) - FIND_LIBRARY(MKL_KERNEL_libirc "irc") - ELSEIF (CMAKE_C_COMPILER_ID AND NOT CMAKE_C_COMPILER_ID STREQUAL "Intel") - FIND_LIBRARY(MKL_KERNEL_libirc "irc") - ENDIF (CMAKE_COMPILER_IS_GNUCC) - MARK_AS_ADVANCED(MKL_KERNEL_libirc) - IF (MKL_KERNEL_libirc) - SET(MKL_LIBRARIES ${MKL_LIBRARIES} ${MKL_KERNEL_libirc}) - ENDIF (MKL_KERNEL_libirc) -ENDIF (MKL_LIBRARIES) - -# Final -SET(CMAKE_LIBRARY_PATH ${saved_CMAKE_LIBRARY_PATH}) -SET(CMAKE_INCLUDE_PATH ${saved_CMAKE_INCLUDE_PATH}) -IF (MKL_LIBRARIES) - SET(MKL_FOUND TRUE) -ELSE (MKL_LIBRARIES) - SET(MKL_FOUND FALSE) - SET(MKL_VERSION) -ENDIF (MKL_LIBRARIES) - -# Standard termination -IF(NOT MKL_FOUND AND MKL_FIND_REQUIRED) - MESSAGE(FATAL_ERROR "MKL library not found. Please specify library location") -ENDIF(NOT MKL_FOUND AND MKL_FIND_REQUIRED) -IF(NOT MKL_FIND_QUIETLY) - IF(MKL_FOUND) - MESSAGE(STATUS "MKL library found") - ELSE(MKL_FOUND) - MESSAGE(STATUS "MKL library not found") - ENDIF(MKL_FOUND) -ENDIF(NOT MKL_FIND_QUIETLY) - -# Do nothing if MKL_FOUND was set before! -ENDIF (NOT MKL_FOUND) - - diff --git a/contrib/lua-torch/torch7/lib/TH/cmake/FindSSE.cmake b/contrib/lua-torch/torch7/lib/TH/cmake/FindSSE.cmake deleted file mode 100644 index a14abe8d4c..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/cmake/FindSSE.cmake +++ /dev/null @@ -1,125 +0,0 @@ -INCLUDE(CheckCSourceRuns) -INCLUDE(CheckCXXSourceRuns) - -SET(SSE1_CODE " - #include - - int main() - { - __m128 a; - float vals[4] = {0,0,0,0}; - a = _mm_loadu_ps(vals); - return 0; - }") - -SET(SSE2_CODE " - #include - - int main() - { - __m128d a; - double vals[2] = {0,0}; - a = _mm_loadu_pd(vals); - return 0; - }") - -SET(SSE3_CODE " - #include - - int main( ) - { - const int vals[4] = {0,0,0,0}; - __m128i a; - a = _mm_lddqu_si128( (const __m128i*)vals ); - return 0; - }") - -SET(SSE4_1_CODE " - #include - - int main () - { - __m128i a = {0,0,0,0}, b = {0,0,0,0}; - __m128i res = _mm_max_epi8(a, b); - - return 0; - } -") - -SET(SSE4_2_CODE " - #include - - int main() - { - __m128i a = {0,0,0,0}, b = {0,0,0,0}, c = {0,0,0,0}; - c = _mm_cmpgt_epi64(a, b); - return 0; - } -") - -SET(AVX_CODE " - #include - - int main() - { - __m256 a; - a = _mm256_set1_ps(0); - return 0; - } -") - -SET(AVX2_CODE " - #include - - int main() - { - __m256i a = {0}; - a = _mm256_abs_epi16(a); - return 0; - } -") - -MACRO(CHECK_SSE lang type flags) - SET(__FLAG_I 1) - SET(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) - FOREACH(__FLAG ${flags}) - IF(NOT ${lang}_${type}_FOUND) - SET(CMAKE_REQUIRED_FLAGS ${__FLAG}) - IF(lang STREQUAL "CXX") - CHECK_CXX_SOURCE_RUNS("${${type}_CODE}" ${lang}_HAS_${type}_${__FLAG_I}) - ELSE() - CHECK_C_SOURCE_RUNS("${${type}_CODE}" ${lang}_HAS_${type}_${__FLAG_I}) - ENDIF() - IF(${lang}_HAS_${type}_${__FLAG_I}) - SET(${lang}_${type}_FOUND TRUE CACHE BOOL "${lang} ${type} support") - SET(${lang}_${type}_FLAGS "${__FLAG}" CACHE STRING "${lang} ${type} flags") - ENDIF() - MATH(EXPR __FLAG_I "${__FLAG_I}+1") - ENDIF() - ENDFOREACH() - SET(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) - - IF(NOT ${lang}_${type}_FOUND) - SET(${lang}_${type}_FOUND FALSE CACHE BOOL "${lang} ${type} support") - SET(${lang}_${type}_FLAGS "" CACHE STRING "${lang} ${type} flags") - ENDIF() - - MARK_AS_ADVANCED(${lang}_${type}_FOUND ${lang}_${type}_FLAGS) - -ENDMACRO() - -CHECK_SSE(C "SSE1" " ;-msse;/arch:SSE") -CHECK_SSE(C "SSE2" " ;-msse2;/arch:SSE2") -CHECK_SSE(C "SSE3" " ;-msse3;/arch:SSE3") -CHECK_SSE(C "SSE4_1" " ;-msse4.1;-msse4;/arch:SSE4") -CHECK_SSE(C "SSE4_2" " ;-msse4.2;-msse4;/arch:SSE4") -CHECK_SSE(C "AVX" " ;-mavx;/arch:AVX") -CHECK_SSE(C "AVX2" " ;-mavx2 -mfma;/arch:AVX2") - -CHECK_SSE(CXX "SSE1" " ;-msse;/arch:SSE") -CHECK_SSE(CXX "SSE2" " ;-msse2;/arch:SSE2") -CHECK_SSE(CXX "SSE3" " ;-msse3;/arch:SSE3") -CHECK_SSE(CXX "SSE4_1" " ;-msse4.1;-msse4;/arch:SSE4") -CHECK_SSE(CXX "SSE4_2" " ;-msse4.2;-msse4;/arch:SSE4") -CHECK_SSE(CXX "AVX" " ;-mavx;/arch:AVX") -CHECK_SSE(CXX "AVX2" " ;-mavx2 -mfma;/arch:AVX2") diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THBlas.c b/contrib/lua-torch/torch7/lib/TH/generic/THBlas.c deleted file mode 100644 index b04931f346..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THBlas.c +++ /dev/null @@ -1,412 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THBlas.c" -#else - - -#ifdef BLAS_F2C -# define ffloat double -#else -# define ffloat float -#endif - -TH_EXTERNC void dswap_(int *n, double *x, int *incx, double *y, int *incy); -TH_EXTERNC void sswap_(int *n, float *x, int *incx, float *y, int *incy); -TH_EXTERNC void dscal_(int *n, double *a, double *x, int *incx); -TH_EXTERNC void sscal_(int *n, float *a, float *x, int *incx); -TH_EXTERNC void dcopy_(int *n, double *x, int *incx, double *y, int *incy); -TH_EXTERNC void scopy_(int *n, float *x, int *incx, float *y, int *incy); -TH_EXTERNC void daxpy_(int *n, double *a, double *x, int *incx, double *y, int *incy); -TH_EXTERNC void saxpy_(int *n, float *a, float *x, int *incx, float *y, int *incy); -TH_EXTERNC double ddot_(int *n, double *x, int *incx, double *y, int *incy); -TH_EXTERNC ffloat sdot_(int *n, float *x, int *incx, float *y, int *incy); -TH_EXTERNC void dgemv_(char *trans, int *m, int *n, double *alpha, double *a, int *lda, double *x, int *incx, double *beta, double *y, int *incy); -TH_EXTERNC void sgemv_(char *trans, int *m, int *n, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy); -TH_EXTERNC void dger_(int *m, int *n, double *alpha, double *x, int *incx, double *y, int *incy, double *a, int *lda); -TH_EXTERNC void sger_(int *m, int *n, float *alpha, float *x, int *incx, float *y, int *incy, float *a, int *lda); -TH_EXTERNC void dgemm_(char *transa, char *transb, int *m, int *n, int *k, double *alpha, double *a, int *lda, double *b, int *ldb, double *beta, double *c, int *ldc); -TH_EXTERNC void sgemm_(char *transa, char *transb, int *m, int *n, int *k, float *alpha, float *a, int *lda, float *b, int *ldb, float *beta, float *c, int *ldc); - - - -void THBlas_(swap)(long n, real *x, long incx, real *y, long incy) -{ - if(n == 1) - { - incx = 1; - incy = 1; - } - -#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT)) - if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) - { - int i_n = (int)n; - int i_incx = (int)incx; - int i_incy = (int)incy; - -#if defined(TH_REAL_IS_DOUBLE) - dswap_(&i_n, x, &i_incx, y, &i_incy); -#else - sswap_(&i_n, x, &i_incx, y, &i_incy); -#endif - return; - } -#endif - { - long i; - for(i = 0; i < n; i++) - { - real z = x[i*incx]; - x[i*incx] = y[i*incy]; - y[i*incy] = z; - } - } -} - -void THBlas_(scal)(long n, real a, real *x, long incx) -{ - if(n == 1) - incx = 1; - -#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT)) - if( (n <= INT_MAX) && (incx <= INT_MAX) ) - { - int i_n = (int)n; - int i_incx = (int)incx; - -#if defined(TH_REAL_IS_DOUBLE) - dscal_(&i_n, &a, x, &i_incx); -#else - sscal_(&i_n, &a, x, &i_incx); -#endif - return; - } -#endif - { - long i; - for(i = 0; i < n; i++) { - if (a == 0) { - x[i*incx] = 0; - } else { - x[i*incx] *= a; - } - } - } -} - -void THBlas_(copy)(long n, real *x, long incx, real *y, long incy) -{ - if(n == 1) - { - incx = 1; - incy = 1; - } - -#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT)) - if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) - { - int i_n = (int)n; - int i_incx = (int)incx; - int i_incy = (int)incy; - -#if defined(TH_REAL_IS_DOUBLE) - dcopy_(&i_n, x, &i_incx, y, &i_incy); -#else - scopy_(&i_n, x, &i_incx, y, &i_incy); -#endif - return; - } -#endif - { - long i; - for(i = 0; i < n; i++) - y[i*incy] = x[i*incx]; - } -} - -void THBlas_(axpy)(long n, real a, real *x, long incx, real *y, long incy) -{ - if(n == 1) - { - incx = 1; - incy = 1; - } - -#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT)) - if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) - { - int i_n = (int)n; - int i_incx = (int)incx; - int i_incy = (int)incy; - -#if defined(TH_REAL_IS_DOUBLE) - daxpy_(&i_n, &a, x, &i_incx, y, &i_incy); -#else - saxpy_(&i_n, &a, x, &i_incx, y, &i_incy); -#endif - return; - } -#endif - { - long i; - for(i = 0; i < n; i++) - y[i*incy] += a*x[i*incx]; - } -} - -real THBlas_(dot)(long n, real *x, long incx, real *y, long incy) -{ - if(n == 1) - { - incx = 1; - incy = 1; - } - -#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT)) - if( (n <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) - { - int i_n = (int)n; - int i_incx = (int)incx; - int i_incy = (int)incy; - -#if defined(TH_REAL_IS_DOUBLE) - return (real) ddot_(&i_n, x, &i_incx, y, &i_incy); -#else - return (real) sdot_(&i_n, x, &i_incx, y, &i_incy); -#endif - } -#endif - { - long i; - real sum = 0; - for(i = 0; i < n; i++) - sum += x[i*incx]*y[i*incy]; - return sum; - } -} - -void THBlas_(gemv)(char trans, long m, long n, real alpha, real *a, long lda, real *x, long incx, real beta, real *y, long incy) -{ - if(n == 1) - lda = m; - -#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT)) - if( (m <= INT_MAX) && (n <= INT_MAX) && - (lda > 0) && (lda <= INT_MAX) && - (incx > 0) && (incx <= INT_MAX) && - (incy > 0) && (incy <= INT_MAX) ) - { - int i_m = (int)m; - int i_n = (int)n; - int i_lda = (int)lda; - int i_incx = (int)incx; - int i_incy = (int)incy; - -#if defined(TH_REAL_IS_DOUBLE) - dgemv_(&trans, &i_m, &i_n, &alpha, a, &i_lda, x, &i_incx, &beta, y, &i_incy); -#else - sgemv_(&trans, &i_m, &i_n, &alpha, a, &i_lda, x, &i_incx, &beta, y, &i_incy); -#endif - return; - } -#endif - { - long i, j; - - if( (trans == 'T') || (trans == 't') ) - { - for(i = 0; i < n; i++) - { - real sum = 0; - real *row_ = a+lda*i; - for(j = 0; j < m; j++) - sum += x[j*incx]*row_[j]; - if (beta == 0) - y[i*incy] = alpha*sum; - else - y[i*incy] = beta*y[i*incy] + alpha*sum; - } - } - else - { - if(beta != 1) - THBlas_(scal)(m, beta, y, incy); - - for(j = 0; j < n; j++) - { - real *column_ = a+lda*j; - real z = alpha*x[j*incx]; - for(i = 0; i < m; i++) - y[i*incy] += z*column_[i]; - } - } - } -} - -void THBlas_(ger)(long m, long n, real alpha, real *x, long incx, real *y, long incy, real *a, long lda) -{ - if(n == 1) - lda = m; - -#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT)) - if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) ) - { - int i_m = (int)m; - int i_n = (int)n; - int i_lda = (int)lda; - int i_incx = (int)incx; - int i_incy = (int)incy; - -#if defined(TH_REAL_IS_DOUBLE) - dger_(&i_m, &i_n, &alpha, x, &i_incx, y, &i_incy, a, &i_lda); -#else - sger_(&i_m, &i_n, &alpha, x, &i_incx, y, &i_incy, a, &i_lda); -#endif - return; - } -#endif - { - long i, j; - for(j = 0; j < n; j++) - { - real *column_ = a+j*lda; - real z = alpha*y[j*incy]; - for(i = 0; i < m; i++) - column_[i] += z*x[i*incx] ; - } - } -} - -void THBlas_(gemm)(char transa, char transb, long m, long n, long k, real alpha, real *a, long lda, real *b, long ldb, real beta, real *c, long ldc) -{ - int transa_ = ((transa == 't') || (transa == 'T')); - int transb_ = ((transb == 't') || (transb == 'T')); - - if(n == 1) - ldc = m; - - if(transa_) - { - if(m == 1) - lda = k; - } - else - { - if(k == 1) - lda = m; - } - - if(transb_) - { - if(k == 1) - ldb = n; - } - else - { - if(n == 1) - ldb = k; - } - -#if defined(USE_BLAS) && (defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_FLOAT)) - if( (m <= INT_MAX) && (n <= INT_MAX) && (k <= INT_MAX) && (lda <= INT_MAX) && (ldb <= INT_MAX) && (ldc <= INT_MAX) ) - { - int i_m = (int)m; - int i_n = (int)n; - int i_k = (int)k; - int i_lda = (int)lda; - int i_ldb = (int)ldb; - int i_ldc = (int)ldc; - -#if defined(TH_REAL_IS_DOUBLE) - dgemm_(&transa, &transb, &i_m, &i_n, &i_k, &alpha, a, &i_lda, b, &i_ldb, &beta, c, &i_ldc); -#else - sgemm_(&transa, &transb, &i_m, &i_n, &i_k, &alpha, a, &i_lda, b, &i_ldb, &beta, c, &i_ldc); -#endif - return; - } -#endif - { - long i, j, l; - if(!transa_ && !transb_) - { - real *a_ = a; - for(i = 0; i < m; i++) - { - real *b_ = b; - for(j = 0; j < n; j++) - { - real sum = 0; - for(l = 0; l < k; l++) - sum += a_[l*lda]*b_[l]; - b_ += ldb; - if (beta == 0) - c[j*ldc+i] = alpha*sum; - else - c[j*ldc+i] = beta*c[j*ldc+i]+alpha*sum; - } - a_++; - } - } - else if(transa_ && !transb_) - { - real *a_ = a; - for(i = 0; i < m; i++) - { - real *b_ = b; - for(j = 0; j < n; j++) - { - real sum = 0; - for(l = 0; l < k; l++) - sum += a_[l]*b_[l]; - b_ += ldb; - if (beta == 0) - c[j*ldc+i] = alpha*sum; - else - c[j*ldc+i] = beta*c[j*ldc+i]+alpha*sum; - } - a_ += lda; - } - } - else if(!transa_ && transb_) - { - real *a_ = a; - for(i = 0; i < m; i++) - { - real *b_ = b; - for(j = 0; j < n; j++) - { - real sum = 0; - for(l = 0; l < k; l++) - sum += a_[l*lda]*b_[l*ldb]; - b_++; - if (beta == 0) - c[j*ldc+i] = alpha*sum; - else - c[j*ldc+i] = beta*c[j*ldc+i]+alpha*sum; - } - a_++; - } - } - else - { - real *a_ = a; - for(i = 0; i < m; i++) - { - real *b_ = b; - for(j = 0; j < n; j++) - { - real sum = 0; - for(l = 0; l < k; l++) - sum += a_[l]*b_[l*ldb]; - b_++; - if (beta == 0) - c[j*ldc+i] = alpha*sum; - else - c[j*ldc+i] = beta*c[j*ldc+i]+alpha*sum; - } - a_ += lda; - } - } - } -} - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THBlas.h b/contrib/lua-torch/torch7/lib/TH/generic/THBlas.h deleted file mode 100644 index 9e14f5a844..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THBlas.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THBlas.h" -#else - -/* Level 1 */ -TH_API void THBlas_(swap)(long n, real *x, long incx, real *y, long incy); -TH_API void THBlas_(scal)(long n, real a, real *x, long incx); -TH_API void THBlas_(copy)(long n, real *x, long incx, real *y, long incy); -TH_API void THBlas_(axpy)(long n, real a, real *x, long incx, real *y, long incy); -TH_API real THBlas_(dot)(long n, real *x, long incx, real *y, long incy); - -/* Level 2 */ -TH_API void THBlas_(gemv)(char trans, long m, long n, real alpha, real *a, long lda, real *x, long incx, real beta, real *y, long incy); -TH_API void THBlas_(ger)(long m, long n, real alpha, real *x, long incx, real *y, long incy, real *a, long lda); - -/* Level 3 */ -TH_API void THBlas_(gemm)(char transa, char transb, long m, long n, long k, real alpha, real *a, long lda, real *b, long ldb, real beta, real *c, long ldc); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THLapack.c b/contrib/lua-torch/torch7/lib/TH/generic/THLapack.c deleted file mode 100644 index 148ae26c4b..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THLapack.c +++ /dev/null @@ -1,270 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THLapack.c" -#else - - -TH_EXTERNC void dgesv_(int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, int *info); -TH_EXTERNC void sgesv_(int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, int *info); -TH_EXTERNC void dtrtrs_(char *uplo, char *trans, char *diag, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, int *info); -TH_EXTERNC void strtrs_(char *uplo, char *trans, char *diag, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, int *info); -TH_EXTERNC void dgels_(char *trans, int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *work, int *lwork, int *info); -TH_EXTERNC void sgels_(char *trans, int *m, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, float *work, int *lwork, int *info); -TH_EXTERNC void dsyev_(char *jobz, char *uplo, int *n, double *a, int *lda, double *w, double *work, int *lwork, int *info); -TH_EXTERNC void ssyev_(char *jobz, char *uplo, int *n, float *a, int *lda, float *w, float *work, int *lwork, int *info); -TH_EXTERNC void dgeev_(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *wr, double *wi, double* vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info); -TH_EXTERNC void sgeev_(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *wr, float *wi, float* vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info); -TH_EXTERNC void dgesvd_(char *jobu, char *jobvt, int *m, int *n, double *a, int *lda, double *s, double *u, int *ldu, double *vt, int *ldvt, double *work, int *lwork, int *info); -TH_EXTERNC void sgesvd_(char *jobu, char *jobvt, int *m, int *n, float *a, int *lda, float *s, float *u, int *ldu, float *vt, int *ldvt, float *work, int *lwork, int *info); -TH_EXTERNC void dgetrf_(int *m, int *n, double *a, int *lda, int *ipiv, int *info); -TH_EXTERNC void sgetrf_(int *m, int *n, float *a, int *lda, int *ipiv, int *info); -TH_EXTERNC void dgetrs_(char *trans, int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, int *info); -TH_EXTERNC void sgetrs_(char *trans, int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, int *info); -TH_EXTERNC void dgetri_(int *n, double *a, int *lda, int *ipiv, double *work, int *lwork, int *info); -TH_EXTERNC void sgetri_(int *n, float *a, int *lda, int *ipiv, float *work, int *lwork, int *info); -TH_EXTERNC void dpotrf_(char *uplo, int *n, double *a, int *lda, int *info); -TH_EXTERNC void spotrf_(char *uplo, int *n, float *a, int *lda, int *info); -TH_EXTERNC void dpotri_(char *uplo, int *n, double *a, int *lda, int *info); -TH_EXTERNC void spotri_(char *uplo, int *n, float *a, int *lda, int *info); -TH_EXTERNC void dpotrs_(char *uplo, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, int *info); -TH_EXTERNC void spotrs_(char *uplo, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, int *info); -TH_EXTERNC void sgeqrf_(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); -TH_EXTERNC void dgeqrf_(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); -TH_EXTERNC void sorgqr_(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info); -TH_EXTERNC void dorgqr_(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info); -TH_EXTERNC void sormqr_(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info); -TH_EXTERNC void dormqr_(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info); -TH_EXTERNC void spstrf_(char *uplo, int *n, float *a, int *lda, int *piv, int *rank, float *tol, float *work, int *info); -TH_EXTERNC void dpstrf_(char *uplo, int *n, double *a, int *lda, int *piv, int *rank, double *tol, double *work, int *info); - - -/* Compute the solution to a real system of linear equations A * X = B */ -void THLapack_(gesv)(int n, int nrhs, real *a, int lda, int *ipiv, real *b, int ldb, int* info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dgesv_(&n, &nrhs, a, &lda, ipiv, b, &ldb, info); -#else - sgesv_(&n, &nrhs, a, &lda, ipiv, b, &ldb, info); -#endif -#else - THError("gesv : Lapack library not found in compile time\n"); -#endif - return; -} - -/* Solve a triangular system of the form A * X = B or A^T * X = B */ -void THLapack_(trtrs)(char uplo, char trans, char diag, int n, int nrhs, real *a, int lda, real *b, int ldb, int* info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dtrtrs_(&uplo, &trans, &diag, &n, &nrhs, a, &lda, b, &ldb, info); -#else - strtrs_(&uplo, &trans, &diag, &n, &nrhs, a, &lda, b, &ldb, info); -#endif -#else - THError("trtrs : Lapack library not found in compile time\n"); -#endif - return; -} - -/* Solve overdetermined or underdetermined real linear systems involving an -M-by-N matrix A, or its transpose, using a QR or LQ factorization of A */ -void THLapack_(gels)(char trans, int m, int n, int nrhs, real *a, int lda, real *b, int ldb, real *work, int lwork, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dgels_(&trans, &m, &n, &nrhs, a, &lda, b, &ldb, work, &lwork, info); -#else - sgels_(&trans, &m, &n, &nrhs, a, &lda, b, &ldb, work, &lwork, info); -#endif -#else - THError("gels : Lapack library not found in compile time\n"); -#endif -} - -/* Compute all eigenvalues and, optionally, eigenvectors of a real symmetric -matrix A */ -void THLapack_(syev)(char jobz, char uplo, int n, real *a, int lda, real *w, real *work, int lwork, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dsyev_(&jobz, &uplo, &n, a, &lda, w, work, &lwork, info); -#else - ssyev_(&jobz, &uplo, &n, a, &lda, w, work, &lwork, info); -#endif -#else - THError("syev : Lapack library not found in compile time\n"); -#endif -} - -/* Compute for an N-by-N real nonsymmetric matrix A, the eigenvalues and, -optionally, the left and/or right eigenvectors */ -void THLapack_(geev)(char jobvl, char jobvr, int n, real *a, int lda, real *wr, real *wi, real* vl, int ldvl, real *vr, int ldvr, real *work, int lwork, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dgeev_(&jobvl, &jobvr, &n, a, &lda, wr, wi, vl, &ldvl, vr, &ldvr, work, &lwork, info); -#else - sgeev_(&jobvl, &jobvr, &n, a, &lda, wr, wi, vl, &ldvl, vr, &ldvr, work, &lwork, info); -#endif -#else - THError("geev : Lapack library not found in compile time\n"); -#endif -} - -/* Compute the singular value decomposition (SVD) of a real M-by-N matrix A, -optionally computing the left and/or right singular vectors */ -void THLapack_(gesvd)(char jobu, char jobvt, int m, int n, real *a, int lda, real *s, real *u, int ldu, real *vt, int ldvt, real *work, int lwork, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dgesvd_( &jobu, &jobvt, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work, &lwork, info); -#else - sgesvd_( &jobu, &jobvt, &m, &n, a, &lda, s, u, &ldu, vt, &ldvt, work, &lwork, info); -#endif -#else - THError("gesvd : Lapack library not found in compile time\n"); -#endif -} - -/* LU decomposition */ -void THLapack_(getrf)(int m, int n, real *a, int lda, int *ipiv, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dgetrf_(&m, &n, a, &lda, ipiv, info); -#else - sgetrf_(&m, &n, a, &lda, ipiv, info); -#endif -#else - THError("getrf : Lapack library not found in compile time\n"); -#endif -} - -void THLapack_(getrs)(char trans, int n, int nrhs, real *a, int lda, int *ipiv, real *b, int ldb, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dgetrs_(&trans, &n, &nrhs, a, &lda, ipiv, b, &ldb, info); -#else - sgetrs_(&trans, &n, &nrhs, a, &lda, ipiv, b, &ldb, info); -#endif -#else - THError("getrs : Lapack library not found in compile time\n"); -#endif -} - -/* Matrix Inverse */ -void THLapack_(getri)(int n, real *a, int lda, int *ipiv, real *work, int lwork, int* info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dgetri_(&n, a, &lda, ipiv, work, &lwork, info); -#else - sgetri_(&n, a, &lda, ipiv, work, &lwork, info); -#endif -#else - THError("getri : Lapack library not found in compile time\n"); -#endif -} - -/* Cholesky factorization */ -void THLapack_(potrf)(char uplo, int n, real *a, int lda, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dpotrf_(&uplo, &n, a, &lda, info); -#else - spotrf_(&uplo, &n, a, &lda, info); -#endif -#else - THError("potrf : Lapack library not found in compile time\n"); -#endif -} - -/* Solve A*X = B with a symmetric positive definite matrix A using the Cholesky factorization */ -void THLapack_(potrs)(char uplo, int n, int nrhs, real *a, int lda, real *b, int ldb, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dpotrs_(&uplo, &n, &nrhs, a, &lda, b, &ldb, info); -#else - spotrs_(&uplo, &n, &nrhs, a, &lda, b, &ldb, info); -#endif -#else - THError("potrs: Lapack library not found in compile time\n"); -#endif -} - -/* Cholesky factorization based Matrix Inverse */ -void THLapack_(potri)(char uplo, int n, real *a, int lda, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dpotri_(&uplo, &n, a, &lda, info); -#else - spotri_(&uplo, &n, a, &lda, info); -#endif -#else - THError("potri: Lapack library not found in compile time\n"); -#endif -} - -/* Cholesky factorization with complete pivoting */ -void THLapack_(pstrf)(char uplo, int n, real *a, int lda, int *piv, int *rank, real tol, real *work, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dpstrf_(&uplo, &n, a, &lda, piv, rank, &tol, work, info); -#else - spstrf_(&uplo, &n, a, &lda, piv, rank, &tol, work, info); -#endif -#else - THError("pstrf: Lapack library not found at compile time\n"); -#endif -} - -/* QR decomposition */ -void THLapack_(geqrf)(int m, int n, real *a, int lda, real *tau, real *work, int lwork, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dgeqrf_(&m, &n, a, &lda, tau, work, &lwork, info); -#else - sgeqrf_(&m, &n, a, &lda, tau, work, &lwork, info); -#endif -#else - THError("geqrf: Lapack library not found in compile time\n"); -#endif -} - -/* Build Q from output of geqrf */ -void THLapack_(orgqr)(int m, int n, int k, real *a, int lda, real *tau, real *work, int lwork, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dorgqr_(&m, &n, &k, a, &lda, tau, work, &lwork, info); -#else - sorgqr_(&m, &n, &k, a, &lda, tau, work, &lwork, info); -#endif -#else - THError("orgqr: Lapack library not found in compile time\n"); -#endif -} - -/* Multiply Q with a matrix using the output of geqrf */ -void THLapack_(ormqr)(char side, char trans, int m, int n, int k, real *a, int lda, real *tau, real *c, int ldc, real *work, int lwork, int *info) -{ -#ifdef USE_LAPACK -#if defined(TH_REAL_IS_DOUBLE) - dormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, &lwork, info); -#else - sormqr_(&side, &trans, &m, &n, &k, a, &lda, tau, c, &ldc, work, &lwork, info); -#endif -#else - THError("ormqr: Lapack library not found in compile time\n"); -#endif -} - - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THLapack.h b/contrib/lua-torch/torch7/lib/TH/generic/THLapack.h deleted file mode 100644 index b464dd2d2b..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THLapack.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THLapack.h" -#else - -/* AX=B */ -TH_API void THLapack_(gesv)(int n, int nrhs, real *a, int lda, int *ipiv, real *b, int ldb, int* info); -/* Solve a triangular system of the form A * X = B or A^T * X = B */ -TH_API void THLapack_(trtrs)(char uplo, char trans, char diag, int n, int nrhs, real *a, int lda, real *b, int ldb, int* info); -/* ||AX-B|| */ -TH_API void THLapack_(gels)(char trans, int m, int n, int nrhs, real *a, int lda, real *b, int ldb, real *work, int lwork, int *info); -/* Eigenvals */ -TH_API void THLapack_(syev)(char jobz, char uplo, int n, real *a, int lda, real *w, real *work, int lwork, int *info); -/* Non-sym eigenvals */ -TH_API void THLapack_(geev)(char jobvl, char jobvr, int n, real *a, int lda, real *wr, real *wi, real* vl, int ldvl, real *vr, int ldvr, real *work, int lwork, int *info); -/* svd */ -TH_API void THLapack_(gesvd)(char jobu, char jobvt, int m, int n, real *a, int lda, real *s, real *u, int ldu, real *vt, int ldvt, real *work, int lwork, int *info); -/* LU decomposition */ -TH_API void THLapack_(getrf)(int m, int n, real *a, int lda, int *ipiv, int *info); -TH_API void THLapack_(getrs)(char trans, int n, int nrhs, real *a, int lda, int *ipiv, real *b, int ldb, int *info); -/* Matrix Inverse */ -TH_API void THLapack_(getri)(int n, real *a, int lda, int *ipiv, real *work, int lwork, int* info); - -/* Positive Definite matrices */ -/* Cholesky factorization */ -void THLapack_(potrf)(char uplo, int n, real *a, int lda, int *info); -/* Matrix inverse based on Cholesky factorization */ -void THLapack_(potri)(char uplo, int n, real *a, int lda, int *info); -/* Solve A*X = B with a symmetric positive definite matrix A using the Cholesky factorization */ -void THLapack_(potrs)(char uplo, int n, int nrhs, real *a, int lda, real *b, int ldb, int *info); -/* Cholesky factorization with complete pivoting. */ -void THLapack_(pstrf)(char uplo, int n, real *a, int lda, int *piv, int *rank, real tol, real *work, int *info); - -/* QR decomposition */ -void THLapack_(geqrf)(int m, int n, real *a, int lda, real *tau, real *work, int lwork, int *info); -/* Build Q from output of geqrf */ -void THLapack_(orgqr)(int m, int n, int k, real *a, int lda, real *tau, real *work, int lwork, int *info); -/* Multiply Q with a matrix from output of geqrf */ -void THLapack_(ormqr)(char side, char trans, int m, int n, int k, real *a, int lda, real *tau, real *c, int ldc, real *work, int lwork, int *info); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THStorage.c b/contrib/lua-torch/torch7/lib/TH/generic/THStorage.c deleted file mode 100644 index a592cfb624..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THStorage.c +++ /dev/null @@ -1,226 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THStorage.c" -#else - -real* THStorage_(data)(const THStorage *self) -{ - return self->data; -} - -ptrdiff_t THStorage_(size)(const THStorage *self) -{ - return self->size; -} - -size_t THStorage_(elementSize)() -{ - return sizeof(real); -} - -THStorage* THStorage_(new)(void) -{ - return THStorage_(newWithSize)(0); -} - -THStorage* THStorage_(newWithSize)(ptrdiff_t size) -{ - return THStorage_(newWithAllocator)(size, &THDefaultAllocator, NULL); -} - -THStorage* THStorage_(newWithAllocator)(ptrdiff_t size, - THAllocator *allocator, - void *allocatorContext) -{ - THStorage *storage = THAlloc(sizeof(THStorage)); - storage->data = allocator->malloc(allocatorContext, sizeof(real)*size); - storage->size = size; - storage->refcount = 1; - storage->flag = TH_STORAGE_REFCOUNTED | TH_STORAGE_RESIZABLE | TH_STORAGE_FREEMEM; - storage->allocator = allocator; - storage->allocatorContext = allocatorContext; - return storage; -} - -THStorage* THStorage_(newWithMapping)(const char *filename, ptrdiff_t size, int flags) -{ - THMapAllocatorContext *ctx = THMapAllocatorContext_new(filename, flags); - - THStorage *storage = THStorage_(newWithAllocator)(size, - &THMapAllocator, - ctx); - - if(size <= 0) - storage->size = THMapAllocatorContext_size(ctx)/sizeof(real); - - THStorage_(clearFlag)(storage, TH_STORAGE_RESIZABLE); - - return storage; -} - -THStorage* THStorage_(newWithSize1)(real data0) -{ - THStorage *self = THStorage_(newWithSize)(1); - self->data[0] = data0; - return self; -} - -THStorage* THStorage_(newWithSize2)(real data0, real data1) -{ - THStorage *self = THStorage_(newWithSize)(2); - self->data[0] = data0; - self->data[1] = data1; - return self; -} - -THStorage* THStorage_(newWithSize3)(real data0, real data1, real data2) -{ - THStorage *self = THStorage_(newWithSize)(3); - self->data[0] = data0; - self->data[1] = data1; - self->data[2] = data2; - return self; -} - -THStorage* THStorage_(newWithSize4)(real data0, real data1, real data2, real data3) -{ - THStorage *self = THStorage_(newWithSize)(4); - self->data[0] = data0; - self->data[1] = data1; - self->data[2] = data2; - self->data[3] = data3; - return self; -} - -void THStorage_(setFlag)(THStorage *storage, const char flag) -{ - storage->flag |= flag; -} - -void THStorage_(clearFlag)(THStorage *storage, const char flag) -{ - storage->flag &= ~flag; -} - -void THStorage_(retain)(THStorage *storage) -{ - if(storage && (storage->flag & TH_STORAGE_REFCOUNTED)) - THAtomicIncrementRef(&storage->refcount); -} - -void THStorage_(free)(THStorage *storage) -{ - if(!storage) - return; - - if((storage->flag & TH_STORAGE_REFCOUNTED) && (THAtomicGet(&storage->refcount) > 0)) - { - if(THAtomicDecrementRef(&storage->refcount)) - { - if(storage->flag & TH_STORAGE_FREEMEM) { - storage->allocator->free(storage->allocatorContext, storage->data); - } - if(storage->flag & TH_STORAGE_VIEW) { - THStorage_(free)(storage->view); - } - THFree(storage); - } - } -} - -THStorage* THStorage_(newWithData)(real *data, ptrdiff_t size) -{ - return THStorage_(newWithDataAndAllocator)(data, size, - &THDefaultAllocator, NULL); -} - -THStorage* THStorage_(newWithDataAndAllocator)(real* data, ptrdiff_t size, - THAllocator* allocator, - void* allocatorContext) { - THStorage *storage = THAlloc(sizeof(THStorage)); - storage->data = data; - storage->size = size; - storage->refcount = 1; - storage->flag = TH_STORAGE_REFCOUNTED | TH_STORAGE_RESIZABLE | TH_STORAGE_FREEMEM; - storage->allocator = allocator; - storage->allocatorContext = allocatorContext; - return storage; -} - -void THStorage_(resize)(THStorage *storage, ptrdiff_t size) -{ - if(storage->flag & TH_STORAGE_RESIZABLE) - { - if(storage->allocator->realloc == NULL) { - /* case when the allocator does not have a realloc defined */ - real *old_data = storage->data; - ptrdiff_t old_size = storage->size; - if (size == 0) { - storage->data = NULL; - } else { - storage->data = storage->allocator->malloc( - storage->allocatorContext, - sizeof(real)*size); - } - storage->size = size; - if (old_data != NULL) { - ptrdiff_t copy_size = old_size; - if (storage->size < copy_size) { - copy_size = storage->size; - } - if (copy_size > 0) { - memcpy(storage->data, old_data, sizeof(real)*copy_size); - } - storage->allocator->free(storage->allocatorContext, old_data); - } - } else { - storage->data = storage->allocator->realloc( - storage->allocatorContext, - storage->data, - sizeof(real)*size); - storage->size = size; - } - } else { - THError("Trying to resize storage that is not resizable"); - } -} - -void THStorage_(fill)(THStorage *storage, real value) -{ - ptrdiff_t i; - for(i = 0; i < storage->size; i++) - storage->data[i] = value; -} - -void THStorage_(set)(THStorage *self, ptrdiff_t idx, real value) -{ - THArgCheck((idx >= 0) && (idx < self->size), 2, "out of bounds"); - self->data[idx] = value; -} - -real THStorage_(get)(const THStorage *self, ptrdiff_t idx) -{ - THArgCheck((idx >= 0) && (idx < self->size), 2, "out of bounds"); - return self->data[idx]; -} - -void THStorage_(swap)(THStorage *storage1, THStorage *storage2) -{ -#define SWAP(val) { val = storage1->val; storage1->val = storage2->val; storage2->val = val; } - real *data; - ptrdiff_t size; - char flag; - THAllocator *allocator; - void *allocatorContext; - struct THStorage *view; - - SWAP(data); - SWAP(size); - SWAP(flag); - // don't swap refcount! - SWAP(allocator); - SWAP(allocatorContext); - SWAP(view); -#undef SWAP -} - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THStorage.h b/contrib/lua-torch/torch7/lib/TH/generic/THStorage.h deleted file mode 100644 index 3dd214b339..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THStorage.h +++ /dev/null @@ -1,71 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THStorage.h" -#else - -/* on pourrait avoir un liste chainee - qui initialise math, lab structures (or more). - mouais -- complique. - - Pb: THMapStorage is kind of a class - THLab_()... comment je m'en sors? - - en template, faudrait que je les instancie toutes!!! oh boy! - Et comment je sais que c'est pour Cuda? Le type float est le meme dans les <> - - au bout du compte, ca serait sur des pointeurs float/double... etc... = facile. - primitives?? - */ - -#define TH_STORAGE_REFCOUNTED 1 -#define TH_STORAGE_RESIZABLE 2 -#define TH_STORAGE_FREEMEM 4 -#define TH_STORAGE_VIEW 8 - -typedef struct THStorage -{ - real *data; - ptrdiff_t size; - int refcount; - char flag; - THAllocator *allocator; - void *allocatorContext; - struct THStorage *view; -} THStorage; - -TH_API real* THStorage_(data)(const THStorage*); -TH_API ptrdiff_t THStorage_(size)(const THStorage*); -TH_API size_t THStorage_(elementSize)(void); - -/* slow access -- checks everything */ -TH_API void THStorage_(set)(THStorage*, ptrdiff_t, real); -TH_API real THStorage_(get)(const THStorage*, ptrdiff_t); - -TH_API THStorage* THStorage_(new)(void); -TH_API THStorage* THStorage_(newWithSize)(ptrdiff_t size); -TH_API THStorage* THStorage_(newWithSize1)(real); -TH_API THStorage* THStorage_(newWithSize2)(real, real); -TH_API THStorage* THStorage_(newWithSize3)(real, real, real); -TH_API THStorage* THStorage_(newWithSize4)(real, real, real, real); -TH_API THStorage* THStorage_(newWithMapping)(const char *filename, ptrdiff_t size, int flags); - -/* takes ownership of data */ -TH_API THStorage* THStorage_(newWithData)(real *data, ptrdiff_t size); - -TH_API THStorage* THStorage_(newWithAllocator)(ptrdiff_t size, - THAllocator* allocator, - void *allocatorContext); -TH_API THStorage* THStorage_(newWithDataAndAllocator)( - real* data, ptrdiff_t size, THAllocator* allocator, void *allocatorContext); - -/* should not differ with API */ -TH_API void THStorage_(setFlag)(THStorage *storage, const char flag); -TH_API void THStorage_(clearFlag)(THStorage *storage, const char flag); -TH_API void THStorage_(retain)(THStorage *storage); -TH_API void THStorage_(swap)(THStorage *storage1, THStorage *storage2); - -/* might differ with other API (like CUDA) */ -TH_API void THStorage_(free)(THStorage *storage); -TH_API void THStorage_(resize)(THStorage *storage, ptrdiff_t size); -TH_API void THStorage_(fill)(THStorage *storage, real value); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THStorageCopy.c b/contrib/lua-torch/torch7/lib/TH/generic/THStorageCopy.c deleted file mode 100644 index ce4b57eaff..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THStorageCopy.c +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THStorageCopy.c" -#else - -void THStorage_(rawCopy)(THStorage *storage, real *src) -{ - ptrdiff_t i; - for(i = 0; i < storage->size; i++) - storage->data[i] = src[i]; -} - -void THStorage_(copy)(THStorage *storage, THStorage *src) -{ - THArgCheck(storage->size == src->size, 2, "size mismatch"); - THStorage_(rawCopy)(storage, src->data); -} - -#define IMPLEMENT_THStorage_COPY(TYPENAMESRC) \ -void THStorage_(copy##TYPENAMESRC)(THStorage *storage, TH##TYPENAMESRC##Storage *src) \ -{ \ - ptrdiff_t i; \ - for(i = 0; i < storage->size; i++) \ - storage->data[i] = (real)src->data[i]; \ -} - -#define IMPLEMENT_THStorage_COPY_FROM_HALF(TYPENAMESRC) \ -void THStorage_(copy##TYPENAMESRC)(THStorage *storage, TH##TYPENAMESRC##Storage *src) \ -{ \ - THArgCheck(storage->size == src->size, 2, "size mismatch"); \ - ptrdiff_t i; \ - for(i = 0; i < storage->size; i++) \ - storage->data[i] = (real)TH_half2float(src->data[i]); \ -} - -#define IMPLEMENT_THStorage_COPY_TO_HALF(TYPENAMESRC) \ -void THStorage_(copy##TYPENAMESRC)(THStorage *storage, TH##TYPENAMESRC##Storage *src) \ -{ \ - THArgCheck(storage->size == src->size, 2, "size mismatch"); \ - ptrdiff_t i; \ - for(i = 0; i < storage->size; i++) \ - storage->data[i] = TH_float2half((float)(src->data[i])); \ -} - -#define IMPLEMENT_THStorage_COPY_TO_FROM_HALF(TYPENAMESRC) \ -void THStorage_(copy##TYPENAMESRC)(THStorage *storage, TH##TYPENAMESRC##Storage *src) \ -{ \ - THArgCheck(storage->size == src->size, 2, "size mismatch"); \ - ptrdiff_t i; \ - for(i = 0; i < storage->size; i++) \ - storage->data[i] = src->data[i]; \ -} - -#ifndef TH_REAL_IS_HALF -IMPLEMENT_THStorage_COPY(Byte) -IMPLEMENT_THStorage_COPY(Char) -IMPLEMENT_THStorage_COPY(Short) -IMPLEMENT_THStorage_COPY(Int) -IMPLEMENT_THStorage_COPY(Long) -IMPLEMENT_THStorage_COPY(Float) -IMPLEMENT_THStorage_COPY(Double) -IMPLEMENT_THStorage_COPY_FROM_HALF(Half) -#else -/* only allow pass-through for Half */ -IMPLEMENT_THStorage_COPY_TO_FROM_HALF(Half) -IMPLEMENT_THStorage_COPY_TO_HALF(Byte) -IMPLEMENT_THStorage_COPY_TO_HALF(Char) -IMPLEMENT_THStorage_COPY_TO_HALF(Short) -IMPLEMENT_THStorage_COPY_TO_HALF(Int) -IMPLEMENT_THStorage_COPY_TO_HALF(Long) -IMPLEMENT_THStorage_COPY_TO_HALF(Float) -IMPLEMENT_THStorage_COPY_TO_HALF(Double) -#endif - - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THStorageCopy.h b/contrib/lua-torch/torch7/lib/TH/generic/THStorageCopy.h deleted file mode 100644 index ce8a2a690d..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THStorageCopy.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THStorageCopy.h" -#else - -/* Support for copy between different Storage types */ - -TH_API void THStorage_(rawCopy)(THStorage *storage, real *src); -TH_API void THStorage_(copy)(THStorage *storage, THStorage *src); -TH_API void THStorage_(copyByte)(THStorage *storage, struct THByteStorage *src); -TH_API void THStorage_(copyChar)(THStorage *storage, struct THCharStorage *src); -TH_API void THStorage_(copyShort)(THStorage *storage, struct THShortStorage *src); -TH_API void THStorage_(copyInt)(THStorage *storage, struct THIntStorage *src); -TH_API void THStorage_(copyLong)(THStorage *storage, struct THLongStorage *src); -TH_API void THStorage_(copyFloat)(THStorage *storage, struct THFloatStorage *src); -TH_API void THStorage_(copyDouble)(THStorage *storage, struct THDoubleStorage *src); -TH_API void THStorage_(copyHalf)(THStorage *storage, struct THHalfStorage *src); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensor.c b/contrib/lua-torch/torch7/lib/TH/generic/THTensor.c deleted file mode 100644 index e44e06ec3c..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensor.c +++ /dev/null @@ -1,939 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensor.c" -#else - -/**** access methods ****/ -THStorage *THTensor_(storage)(const THTensor *self) -{ - return self->storage; -} - -ptrdiff_t THTensor_(storageOffset)(const THTensor *self) -{ - return self->storageOffset; -} - -int THTensor_(nDimension)(const THTensor *self) -{ - return self->nDimension; -} - -long THTensor_(size)(const THTensor *self, int dim) -{ - THArgCheck((dim >= 0) && (dim < self->nDimension), 2, "dimension %d out of range of %dD tensor", - dim+TH_INDEX_BASE, THTensor_(nDimension)(self)); - return self->size[dim]; -} - -long THTensor_(stride)(const THTensor *self, int dim) -{ - THArgCheck((dim >= 0) && (dim < self->nDimension), 2, "dimension %d out of range of %dD tensor", - dim+TH_INDEX_BASE, THTensor_(nDimension)(self)); - return self->stride[dim]; -} - -THLongStorage *THTensor_(newSizeOf)(THTensor *self) -{ - THLongStorage *size = THLongStorage_newWithSize(self->nDimension); - THLongStorage_rawCopy(size, self->size); - return size; -} - -THLongStorage *THTensor_(newStrideOf)(THTensor *self) -{ - THLongStorage *stride = THLongStorage_newWithSize(self->nDimension); - THLongStorage_rawCopy(stride, self->stride); - return stride; -} - -real *THTensor_(data)(const THTensor *self) -{ - if(self->storage) - return (self->storage->data+self->storageOffset); - else - return NULL; -} - -void THTensor_(setFlag)(THTensor *self, const char flag) -{ - self->flag |= flag; -} - -void THTensor_(clearFlag)(THTensor *self, const char flag) -{ - self->flag &= ~flag; -} - -/**** creation methods ****/ - -static void THTensor_(rawInit)(THTensor *self); - - -/* Empty init */ -THTensor *THTensor_(new)(void) -{ - THTensor *self = THAlloc(sizeof(THTensor)); - THTensor_(rawInit)(self); - return self; -} - -/* Pointer-copy init */ -THTensor *THTensor_(newWithTensor)(THTensor *tensor) -{ - THTensor *self = THAlloc(sizeof(THTensor)); - THTensor_(rawInit)(self); - THTensor_(setStorageNd)(self, - tensor->storage, - tensor->storageOffset, - tensor->nDimension, - tensor->size, - tensor->stride); - return self; -} - -/* Storage init */ -THTensor *THTensor_(newWithStorage)(THStorage *storage, ptrdiff_t storageOffset, THLongStorage *size, THLongStorage *stride) -{ - THTensor *self = THAlloc(sizeof(THTensor)); - if(size && stride) - THArgCheck(size->size == stride->size, 4, "inconsistent size"); - - THTensor_(rawInit)(self); -#ifdef DEBUG - THAssert((size ? size->size : (stride ? stride->size : 0)) <= INT_MAX); -#endif - THTensor_(setStorageNd)(self, - storage, - storageOffset, - (size ? size->size : (stride ? stride->size : 0)), - (size ? size->data : NULL), - (stride ? stride->data : NULL)); - - return self; -} -THTensor *THTensor_(newWithStorage1d)(THStorage *storage, ptrdiff_t storageOffset, - long size0, long stride0) -{ - return THTensor_(newWithStorage4d)(storage, storageOffset, size0, stride0, -1, -1, -1, -1, -1, -1); -} - -THTensor *THTensor_(newWithStorage2d)(THStorage *storage, ptrdiff_t storageOffset, - long size0, long stride0, - long size1, long stride1) -{ - return THTensor_(newWithStorage4d)(storage, storageOffset, size0, stride0, size1, stride1, -1, -1, -1, -1); -} - -THTensor *THTensor_(newWithStorage3d)(THStorage *storage, ptrdiff_t storageOffset, - long size0, long stride0, - long size1, long stride1, - long size2, long stride2) -{ - return THTensor_(newWithStorage4d)(storage, storageOffset, size0, stride0, size1, stride1, size2, stride2, -1, -1); -} - -THTensor *THTensor_(newWithStorage4d)(THStorage *storage, ptrdiff_t storageOffset, - long size0, long stride0, - long size1, long stride1, - long size2, long stride2, - long size3, long stride3) -{ - long size[4] = {size0, size1, size2, size3}; - long stride[4] = {stride0, stride1, stride2, stride3}; - - THTensor *self = THAlloc(sizeof(THTensor)); - THTensor_(rawInit)(self); - THTensor_(setStorageNd)(self, storage, storageOffset, 4, size, stride); - - return self; -} - -THTensor *THTensor_(newWithSize)(THLongStorage *size, THLongStorage *stride) -{ - return THTensor_(newWithStorage)(NULL, 0, size, stride); -} - -THTensor *THTensor_(newWithSize1d)(long size0) -{ - return THTensor_(newWithSize4d)(size0, -1, -1, -1); -} - -THTensor *THTensor_(newWithSize2d)(long size0, long size1) -{ - return THTensor_(newWithSize4d)(size0, size1, -1, -1); -} - -THTensor *THTensor_(newWithSize3d)(long size0, long size1, long size2) -{ - return THTensor_(newWithSize4d)(size0, size1, size2, -1); -} - -THTensor *THTensor_(newWithSize4d)(long size0, long size1, long size2, long size3) -{ - long size[4] = {size0, size1, size2, size3}; - - THTensor *self = THAlloc(sizeof(THTensor)); - THTensor_(rawInit)(self); - THTensor_(resizeNd)(self, 4, size, NULL); - - return self; -} - -THTensor *THTensor_(newClone)(THTensor *self) -{ - THTensor *tensor = THTensor_(new)(); - THTensor_(resizeAs)(tensor, self); - THTensor_(copy)(tensor, self); - return tensor; -} - -THTensor *THTensor_(newContiguous)(THTensor *self) -{ - if(!THTensor_(isContiguous)(self)) - return THTensor_(newClone)(self); - else - { - THTensor_(retain)(self); - return self; - } -} - -THTensor *THTensor_(newSelect)(THTensor *tensor, int dimension_, long sliceIndex_) -{ - THTensor *self = THTensor_(newWithTensor)(tensor); - THTensor_(select)(self, NULL, dimension_, sliceIndex_); - return self; -} - -THTensor *THTensor_(newNarrow)(THTensor *tensor, int dimension_, long firstIndex_, long size_) -{ - THTensor *self = THTensor_(newWithTensor)(tensor); - THTensor_(narrow)(self, NULL, dimension_, firstIndex_, size_); - return self; -} - -THTensor *THTensor_(newTranspose)(THTensor *tensor, int dimension1_, int dimension2_) -{ - THTensor *self = THTensor_(newWithTensor)(tensor); - THTensor_(transpose)(self, NULL, dimension1_, dimension2_); - return self; -} - -THTensor *THTensor_(newUnfold)(THTensor *tensor, int dimension_, long size_, long step_) -{ - THTensor *self = THTensor_(newWithTensor)(tensor); - THTensor_(unfold)(self, NULL, dimension_, size_, step_); - return self; -} - -THTensor *THTensor_(newView)(THTensor *tensor, THLongStorage *size) -{ - THArgCheck(THTensor_(isContiguous)(tensor), 1, "input is not contiguous"); - ptrdiff_t numel = THTensor_(nElement)(tensor); - THTensor *self = THTensor_(new)(); - THLongStorage *inferred_size = THLongStorage_newInferSize(size, numel); - THTensor_(setStorage)(self, tensor->storage, tensor->storageOffset, inferred_size, NULL); - THLongStorage_free(inferred_size); - return self; -} - -/* Resize */ -void THTensor_(resize)(THTensor *self, THLongStorage *size, THLongStorage *stride) -{ - THArgCheck(size != NULL, 2, "invalid size"); - if(stride) - THArgCheck(stride->size == size->size, 3, "invalid stride"); - -#ifdef DEBUG - THAssert(size->size <= INT_MAX); -#endif - THTensor_(resizeNd)(self, size->size, size->data, (stride ? stride->data : NULL)); -} - -void THTensor_(resizeAs)(THTensor *self, THTensor *src) -{ - if(!THTensor_(isSameSizeAs)(self, src)) - THTensor_(resizeNd)(self, src->nDimension, src->size, NULL); -} - -void THTensor_(resize1d)(THTensor *tensor, long size0) -{ - THTensor_(resize4d)(tensor, size0, -1, -1, -1); -} - -void THTensor_(resize2d)(THTensor *tensor, long size0, long size1) -{ - THTensor_(resize4d)(tensor, size0, size1, -1, -1); -} - -void THTensor_(resize3d)(THTensor *tensor, long size0, long size1, long size2) -{ - THTensor_(resize4d)(tensor, size0, size1, size2, -1); -} - -void THTensor_(resize4d)(THTensor *self, long size0, long size1, long size2, long size3) -{ - long size[4] = {size0, size1, size2, size3}; - - THTensor_(resizeNd)(self, 4, size, NULL); -} - -void THTensor_(resize5d)(THTensor *self, long size0, long size1, long size2, long size3, long size4) -{ - long size[5] = {size0, size1, size2, size3, size4}; - - THTensor_(resizeNd)(self, 5, size, NULL); -} - -THTensor* THTensor_(newExpand)(THTensor *tensor, THLongStorage *sizes) { - THTensor *result = THTensor_(new)(); - THTensor_(expand)(result, tensor, sizes); - return result; -} - -void THTensor_(expand)(THTensor *r, THTensor *tensor, THLongStorage *sizes) { - THArgCheck(THTensor_(nDimension)(tensor) > 0, 0, "can't expand an empty tensor"); - THArgCheck(THLongStorage_size(sizes) >= THTensor_(nDimension)(tensor), 1, - "the number of sizes provided must be greater or equal to the " - "number of dimensions in the tensor"); - - long *expandedSizes; - long *expandedStrides; - char error_buffer[1024]; - int ret = - THLongStorage_inferExpandGeometry(tensor->size, tensor->stride, THTensor_(nDimension)(tensor), - sizes, &expandedSizes, &expandedStrides, error_buffer, 1024); - - if (ret != 0) { - THError(error_buffer); - return; - } - - THTensor_(setStorageNd)(r, THTensor_(storage)(tensor), THTensor_(storageOffset)(tensor), - THLongStorage_size(sizes), expandedSizes, expandedStrides); - THFree(expandedSizes); - THFree(expandedStrides); -} - - -void THTensor_(expandNd)(THTensor **rets, THTensor **ops, int count) { - for (int i = 0; i < count; ++i) { - THArgCheck(THTensor_(nDimension)(ops[i]) > 0, i, "can't expand empty tensor %d", i); - } - - long *op_sizes[count]; - long op_dims[count]; - - for (int i = 0; i < count; ++i) { - op_sizes[i] = ops[i]->size; - op_dims[i] = ops[i]->nDimension; - } - - THLongStorage *sizes = THLongStorage_new(); - char error_buffer[1024]; - int ret = THLongStorage_inferSizeN(sizes, - count, - op_sizes, - op_dims, - error_buffer, - 1024); - - if(ret != 0) { - THLongStorage_free(sizes); - THError(error_buffer); - return; - } - - for (int i = 0; i < count; ++i) { - THTensor_(expand)(rets[i], ops[i], sizes); - } - - THLongStorage_free(sizes); -} - -void THTensor_(set)(THTensor *self, THTensor *src) -{ - if(self != src) - THTensor_(setStorageNd)(self, - src->storage, - src->storageOffset, - src->nDimension, - src->size, - src->stride); -} - -void THTensor_(setStorage)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, THLongStorage *size_, THLongStorage *stride_) -{ - if(size_ && stride_) - THArgCheck(size_->size == stride_->size, 5, "inconsistent size/stride sizes"); - -#ifdef DEBUG - THAssert((size_ ? size_->size : (stride_ ? stride_->size : 0)) <= INT_MAX); -#endif - THTensor_(setStorageNd)(self, - storage_, - storageOffset_, - (size_ ? size_->size : (stride_ ? stride_->size : 0)), - (size_ ? size_->data : NULL), - (stride_ ? stride_->data : NULL)); -} - -void THTensor_(setStorage1d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_) -{ - THTensor_(setStorage4d)(self, storage_, storageOffset_, - size0_, stride0_, - -1, -1, - -1, -1, - -1, -1); -} - -void THTensor_(setStorage2d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_, - long size1_, long stride1_) -{ - THTensor_(setStorage4d)(self, storage_, storageOffset_, - size0_, stride0_, - size1_, stride1_, - -1, -1, - -1, -1); -} - -void THTensor_(setStorage3d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_, - long size1_, long stride1_, - long size2_, long stride2_) -{ - THTensor_(setStorage4d)(self, storage_, storageOffset_, - size0_, stride0_, - size1_, stride1_, - size2_, stride2_, - -1, -1); -} - -void THTensor_(setStorage4d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_, - long size1_, long stride1_, - long size2_, long stride2_, - long size3_, long stride3_) -{ - - long size[4] = {size0_, size1_, size2_, size3_}; - long stride[4] = {stride0_, stride1_, stride2_, stride3_}; - - THTensor_(setStorageNd)(self, storage_, storageOffset_, 4, size, stride); -} - - -void THTensor_(narrow)(THTensor *self, THTensor *src, int dimension, long firstIndex, long size) -{ - if(!src) - src = self; - - THArgCheck( (dimension >= 0) && (dimension < src->nDimension), 2, "out of range"); - THArgCheck( (firstIndex >= 0) && (firstIndex < src->size[dimension]), 3, "out of range"); - THArgCheck( (size > 0) && (firstIndex <= src->size[dimension] - size), 4, "out of range"); - - THTensor_(set)(self, src); - - if(firstIndex > 0) - self->storageOffset += firstIndex*self->stride[dimension]; - - self->size[dimension] = size; -} - -void THTensor_(select)(THTensor *self, THTensor *src, int dimension, long sliceIndex) -{ - int d; - - if(!src) - src = self; - - THArgCheck(src->nDimension > 1, 1, "cannot select on a vector"); - THArgCheck((dimension >= 0) && (dimension < src->nDimension), 2, "out of range"); - THArgCheck((sliceIndex >= 0) && (sliceIndex < src->size[dimension]), 3, "out of range"); - - THTensor_(set)(self, src); - THTensor_(narrow)(self, NULL, dimension, sliceIndex, 1); - for(d = dimension; d < self->nDimension-1; d++) - { - self->size[d] = self->size[d+1]; - self->stride[d] = self->stride[d+1]; - } - self->nDimension--; -} - -void THTensor_(transpose)(THTensor *self, THTensor *src, int dimension1, int dimension2) -{ - long z; - - if(!src) - src = self; - - THArgCheck( (dimension1 >= 0) && (dimension1 < src->nDimension), 1, "out of range"); - THArgCheck( (dimension2 >= 0) && (dimension2 < src->nDimension), 2, "out of range"); - - THTensor_(set)(self, src); - - if(dimension1 == dimension2) - return; - - z = self->stride[dimension1]; - self->stride[dimension1] = self->stride[dimension2]; - self->stride[dimension2] = z; - z = self->size[dimension1]; - self->size[dimension1] = self->size[dimension2]; - self->size[dimension2] = z; -} - -void THTensor_(unfold)(THTensor *self, THTensor *src, int dimension, long size, long step) -{ - long *newSize; - long *newStride; - int d; - - if(!src) - src = self; - - THArgCheck( (src->nDimension > 0), 1, "cannot unfold an empty tensor"); - THArgCheck((dimension >= 0) && (dimension < src->nDimension), 2, "out of range"); - THArgCheck(size <= src->size[dimension], 3, "out of range"); - THArgCheck(step > 0, 4, "invalid step"); - - THTensor_(set)(self, src); - - newSize = THAlloc(sizeof(long)*(self->nDimension+1)); - newStride = THAlloc(sizeof(long)*(self->nDimension+1)); - - newSize[self->nDimension] = size; - newStride[self->nDimension] = self->stride[dimension]; - for(d = 0; d < self->nDimension; d++) - { - if(d == dimension) - { - newSize[d] = (self->size[d] - size) / step + 1; - newStride[d] = step*self->stride[d]; - } - else - { - newSize[d] = self->size[d]; - newStride[d] = self->stride[d]; - } - } - - THFree(self->size); - THFree(self->stride); - - self->size = newSize; - self->stride = newStride; - self->nDimension++; -} - -/* we have to handle the case where the result is a number */ -void THTensor_(squeeze)(THTensor *self, THTensor *src) -{ - int ndim = 0; - int d; - - if(!src) - src = self; - - THTensor_(set)(self, src); - - for(d = 0; d < src->nDimension; d++) - { - if(src->size[d] != 1) - { - if(d != ndim) - { - self->size[ndim] = src->size[d]; - self->stride[ndim] = src->stride[d]; - } - ndim++; - } - } - - /* right now, we do not handle 0-dimension tensors */ - if(ndim == 0 && src->nDimension > 0) - { - self->size[0] = 1; - self->stride[0] = 1; - ndim = 1; - } - self->nDimension = ndim; -} - -void THTensor_(squeeze1d)(THTensor *self, THTensor *src, int dimension) -{ - int d; - - if(!src) - src = self; - - THArgCheck((dimension >= 0) && (dimension < src->nDimension), 2, "dimension out of range"); - - THTensor_(set)(self, src); - - if(src->size[dimension] == 1 && src->nDimension > 1) - { - for(d = dimension; d < self->nDimension-1; d++) - { - self->size[d] = self->size[d+1]; - self->stride[d] = self->stride[d+1]; - } - self->nDimension--; - } -} - -void THTensor_(unsqueeze1d)(THTensor *self, THTensor *src, int dimension) -{ - int d; - - if(!src) - src = self; - - THArgCheck((dimension >= 0) && (dimension <= src->nDimension), 2, "dimension out of range"); - THArgCheck(src->nDimension > 0, 2, "cannot unsqueeze empty tensor"); - - THTensor_(set)(self, src); - - self->size = (long*)THRealloc(self->size, sizeof(long)*(self->nDimension+1)); - self->stride = (long*)THRealloc(self->stride, sizeof(long)*(self->nDimension+1)); - self->nDimension++; - for (d = self->nDimension-1; d > dimension; d--) { - self->size[d] = self->size[d-1]; - self->stride[d] = self->stride[d-1]; - } - if (dimension+1 < self->nDimension) { - self->stride[dimension] = self->size[dimension+1] * self->stride[dimension+1]; - } else { - self->stride[dimension] = 1; - } - self->size[dimension] = 1; -} - -int THTensor_(isTransposed)(const THTensor *self) -{ - if (THTensor_(isContiguous)(self)) { - return 0; - } - long max_stride = 1; - long size_max_stride = 1; - long z = 1; - int d; - for (d = 0; d < self->nDimension; ++d) { - if (self->stride[d] == 0 && self->size[d] != 1) - return 0; - if (self->stride[d] > max_stride) { - max_stride = self->stride[d]; - size_max_stride = self->size[d]; - } - z *= self->size[d]; - } - if (z == max_stride * size_max_stride) { - return 1; - } - return 0; -} - -int THTensor_(isContiguous)(const THTensor *self) -{ - long z = 1; - int d; - for(d = self->nDimension-1; d >= 0; d--) - { - if(self->size[d] != 1) - { - if(self->stride[d] == z) - z *= self->size[d]; - else - return 0; - } - } - return 1; -} - -int THTensor_(isSize)(const THTensor *self, const THLongStorage *dims) -{ - int d; - if (self->nDimension != dims->size) - return 0; - - for(d = 0; d < self->nDimension; ++d) - { - if(self->size[d] != dims->data[d]) - return 0; - } - return 1; -} - -int THTensor_(isSameSizeAs)(const THTensor *self, const THTensor* src) -{ - int d; - if (self->nDimension != src->nDimension) - return 0; - for(d = 0; d < self->nDimension; ++d) - { - if(self->size[d] != src->size[d]) - return 0; - } - return 1; -} - -int THTensor_(isSetTo)(const THTensor *self, const THTensor* src) -{ - if (!self->storage) - return 0; - if (self->storage == src->storage && - self->storageOffset == src->storageOffset && - self->nDimension == src->nDimension) - { - int d; - for (d = 0; d < self->nDimension; ++d) - { - if (self->size[d] != src->size[d] || self->stride[d] != src->stride[d]) - return 0; - } - return 1; - } - return 0; -} - -ptrdiff_t THTensor_(nElement)(const THTensor *self) -{ - if(self->nDimension == 0) - return 0; - else - { - ptrdiff_t nElement = 1; - int d; - for(d = 0; d < self->nDimension; d++) - nElement *= self->size[d]; - return nElement; - } -} - -void THTensor_(retain)(THTensor *self) -{ - if(self->flag & TH_TENSOR_REFCOUNTED) - THAtomicIncrementRef(&self->refcount); -} - -void THTensor_(free)(THTensor *self) -{ - if(!self) - return; - - if(self->flag & TH_TENSOR_REFCOUNTED) - { - if(THAtomicDecrementRef(&self->refcount)) - { - THFree(self->size); - THFree(self->stride); - if(self->storage) - THStorage_(free)(self->storage); - THFree(self); - } - } -} - -void THTensor_(freeCopyTo)(THTensor *self, THTensor *dst) -{ - if(self != dst) - THTensor_(copy)(dst, self); - - THTensor_(free)(self); -} - -/*******************************************************************************/ - -static void THTensor_(rawInit)(THTensor *self) -{ - self->refcount = 1; - self->storage = NULL; - self->storageOffset = 0; - self->size = NULL; - self->stride = NULL; - self->nDimension = 0; - self->flag = TH_TENSOR_REFCOUNTED; -} - -void THTensor_(setStorageNd)(THTensor *self, THStorage *storage, ptrdiff_t storageOffset, int nDimension, long *size, long *stride) -{ - /* storage */ - if(self->storage != storage) - { - if(self->storage) - THStorage_(free)(self->storage); - - if(storage) - { - self->storage = storage; - THStorage_(retain)(self->storage); - } - else - self->storage = NULL; - } - - /* storageOffset */ - if(storageOffset < 0) - THError("Tensor: invalid storage offset"); - self->storageOffset = storageOffset; - - /* size and stride */ - THTensor_(resizeNd)(self, nDimension, size, stride); -} - -void THTensor_(resizeNd)(THTensor *self, int nDimension, long *size, long *stride) -{ - int d; - int nDimension_; - ptrdiff_t totalSize; - int hascorrectsize = 1; - - nDimension_ = 0; - for(d = 0; d < nDimension; d++) - { - if(size[d] > 0) - { - nDimension_++; - if((self->nDimension > d) && (size[d] != self->size[d])) - hascorrectsize = 0; - - if((self->nDimension > d) && stride && (stride[d] >= 0) && (stride[d] != self->stride[d])) - hascorrectsize = 0; - } - else - break; - } - nDimension = nDimension_; - - if(nDimension != self->nDimension) - hascorrectsize = 0; - - if(hascorrectsize) - return; - - if(nDimension > 0) - { - if(nDimension != self->nDimension) - { - self->size = THRealloc(self->size, sizeof(long)*nDimension); - self->stride = THRealloc(self->stride, sizeof(long)*nDimension); - self->nDimension = nDimension; - } - - totalSize = 1; - for(d = self->nDimension-1; d >= 0; d--) - { - self->size[d] = size[d]; - if(stride && (stride[d] >= 0) ) - self->stride[d] = stride[d]; - else - { - if(d == self->nDimension-1) - self->stride[d] = 1; - else - self->stride[d] = self->size[d+1]*self->stride[d+1]; - } - totalSize += (self->size[d]-1)*self->stride[d]; - } - - if(totalSize+self->storageOffset > 0) - { - if(!self->storage) - self->storage = THStorage_(new)(); - if(totalSize+self->storageOffset > self->storage->size) - THStorage_(resize)(self->storage, totalSize+self->storageOffset); - } - } - else - self->nDimension = 0; -} - -void THTensor_(set1d)(THTensor *tensor, long x0, real value) -{ - THArgCheck(tensor->nDimension == 1, 1, "tensor must have one dimension"); - THArgCheck( (x0 >= 0) && (x0 < tensor->size[0]), 2, "out of range"); - THStorage_(set)(tensor->storage, tensor->storageOffset+x0*tensor->stride[0], value); -} - -real THTensor_(get1d)(const THTensor *tensor, long x0) -{ - THArgCheck(tensor->nDimension == 1, 1, "tensor must have one dimension"); - THArgCheck( (x0 >= 0) && (x0 < tensor->size[0]), 2, "out of range"); - return THStorage_(get)(tensor->storage, tensor->storageOffset+x0*tensor->stride[0]); -} - -void THTensor_(set2d)(THTensor *tensor, long x0, long x1, real value) -{ - THArgCheck(tensor->nDimension == 2, 1, "tensor must have two dimensions"); - THArgCheck((x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]), 2, "out of range"); - THStorage_(set)(tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1], value); -} - -real THTensor_(get2d)(const THTensor *tensor, long x0, long x1) -{ - THArgCheck(tensor->nDimension == 2, 1, "tensor must have two dimensions"); - THArgCheck((x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]), 2, "out of range"); - return THStorage_(get)(tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]); -} - -void THTensor_(set3d)(THTensor *tensor, long x0, long x1, long x2, real value) -{ - THArgCheck(tensor->nDimension == 3, 1, "tensor must have three dimensions"); - THArgCheck( (x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]) && (x2 >= 0) && (x2 < tensor->size[2]), 2, "out of range"); - THStorage_(set)(tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2], value); -} - -real THTensor_(get3d)(const THTensor *tensor, long x0, long x1, long x2) -{ - THArgCheck(tensor->nDimension == 3, 1, "tensor must have three dimensions"); - THArgCheck( (x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]) && (x2 >= 0) && (x2 < tensor->size[2]), 2, "out of range"); - return THStorage_(get)(tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2]); -} - -void THTensor_(set4d)(THTensor *tensor, long x0, long x1, long x2, long x3, real value) -{ - THArgCheck(tensor->nDimension == 4, 1, "tensor must have four dimensions"); - THArgCheck((x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]) && (x2 >= 0) && (x2 < tensor->size[2]) && (x3 >= 0) && (x3 < tensor->size[3]), 2, "out of range"); - THStorage_(set)(tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2]+x3*tensor->stride[3], value); -} - -real THTensor_(get4d)(const THTensor *tensor, long x0, long x1, long x2, long x3) -{ - THArgCheck(tensor->nDimension == 4, 1, "tensor must have four dimensions"); - THArgCheck((x0 >= 0) && (x0 < tensor->size[0]) && (x1 >= 0) && (x1 < tensor->size[1]) && (x2 >= 0) && (x2 < tensor->size[2]) && (x3 >= 0) && (x3 < tensor->size[3]), 2, "out of range"); - return THStorage_(get)(tensor->storage, tensor->storageOffset+x0*tensor->stride[0]+x1*tensor->stride[1]+x2*tensor->stride[2]+x3*tensor->stride[3]); -} - -THDescBuff THTensor_(desc)(const THTensor *tensor) { - const int L = TH_DESC_BUFF_LEN; - THDescBuff buf; - char *str = buf.str; - int n = 0; -#define _stringify(x) #x - n += snprintf(str, L-n, "torch." _stringify(x) "Tensor of size "); -#undef _stringify - int i; - for(i = 0; i < tensor->nDimension; i++) { - if(n >= L) break; - n += snprintf(str+n, L-n, "%ld", tensor->size[i]); - if(i < tensor->nDimension-1) { - n += snprintf(str+n, L-n, "x"); - } - } - if(n >= L) { - snprintf(str+L-4, 4, "..."); - } - return buf; -} - -THDescBuff THTensor_(sizeDesc)(const THTensor *tensor) { - THLongStorage *size = THTensor_(newSizeOf)((THTensor*)tensor); - THDescBuff buf = THLongStorage_sizeDesc(size); - THLongStorage_free(size); - return buf; -} - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensor.h b/contrib/lua-torch/torch7/lib/TH/generic/THTensor.h deleted file mode 100644 index 9fb246c856..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensor.h +++ /dev/null @@ -1,138 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensor.h" -#else - -/* a la lua? dim, storageoffset, ... et les methodes ? */ - -#define TH_TENSOR_REFCOUNTED 1 - -typedef struct THTensor -{ - long *size; - long *stride; - int nDimension; - - THStorage *storage; - ptrdiff_t storageOffset; - int refcount; - - char flag; - -} THTensor; - - -/**** access methods ****/ -TH_API THStorage* THTensor_(storage)(const THTensor *self); -TH_API ptrdiff_t THTensor_(storageOffset)(const THTensor *self); -TH_API int THTensor_(nDimension)(const THTensor *self); -TH_API long THTensor_(size)(const THTensor *self, int dim); -TH_API long THTensor_(stride)(const THTensor *self, int dim); -TH_API THLongStorage *THTensor_(newSizeOf)(THTensor *self); -TH_API THLongStorage *THTensor_(newStrideOf)(THTensor *self); -TH_API real *THTensor_(data)(const THTensor *self); - -TH_API void THTensor_(setFlag)(THTensor *self, const char flag); -TH_API void THTensor_(clearFlag)(THTensor *self, const char flag); - - -/**** creation methods ****/ -TH_API THTensor *THTensor_(new)(void); -TH_API THTensor *THTensor_(newWithTensor)(THTensor *tensor); -/* stride might be NULL */ -TH_API THTensor *THTensor_(newWithStorage)(THStorage *storage_, ptrdiff_t storageOffset_, THLongStorage *size_, THLongStorage *stride_); -TH_API THTensor *THTensor_(newWithStorage1d)(THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_); -TH_API THTensor *THTensor_(newWithStorage2d)(THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_, - long size1_, long stride1_); -TH_API THTensor *THTensor_(newWithStorage3d)(THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_, - long size1_, long stride1_, - long size2_, long stride2_); -TH_API THTensor *THTensor_(newWithStorage4d)(THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_, - long size1_, long stride1_, - long size2_, long stride2_, - long size3_, long stride3_); - -/* stride might be NULL */ -TH_API THTensor *THTensor_(newWithSize)(THLongStorage *size_, THLongStorage *stride_); -TH_API THTensor *THTensor_(newWithSize1d)(long size0_); -TH_API THTensor *THTensor_(newWithSize2d)(long size0_, long size1_); -TH_API THTensor *THTensor_(newWithSize3d)(long size0_, long size1_, long size2_); -TH_API THTensor *THTensor_(newWithSize4d)(long size0_, long size1_, long size2_, long size3_); - -TH_API THTensor *THTensor_(newClone)(THTensor *self); -TH_API THTensor *THTensor_(newContiguous)(THTensor *tensor); -TH_API THTensor *THTensor_(newSelect)(THTensor *tensor, int dimension_, long sliceIndex_); -TH_API THTensor *THTensor_(newNarrow)(THTensor *tensor, int dimension_, long firstIndex_, long size_); -TH_API THTensor *THTensor_(newTranspose)(THTensor *tensor, int dimension1_, int dimension2_); -TH_API THTensor *THTensor_(newUnfold)(THTensor *tensor, int dimension_, long size_, long step_); -TH_API THTensor *THTensor_(newView)(THTensor *tensor, THLongStorage *size); -TH_API THTensor *THTensor_(newExpand)(THTensor *tensor, THLongStorage *size); - -TH_API void THTensor_(expand)(THTensor *r, THTensor *tensor, THLongStorage *size); -TH_API void THTensor_(expandNd)(THTensor **rets, THTensor **ops, int count); - -TH_API void THTensor_(resize)(THTensor *tensor, THLongStorage *size, THLongStorage *stride); -TH_API void THTensor_(resizeAs)(THTensor *tensor, THTensor *src); -TH_API void THTensor_(resizeNd)(THTensor *tensor, int nDimension, long *size, long *stride); -TH_API void THTensor_(resize1d)(THTensor *tensor, long size0_); -TH_API void THTensor_(resize2d)(THTensor *tensor, long size0_, long size1_); -TH_API void THTensor_(resize3d)(THTensor *tensor, long size0_, long size1_, long size2_); -TH_API void THTensor_(resize4d)(THTensor *tensor, long size0_, long size1_, long size2_, long size3_); -TH_API void THTensor_(resize5d)(THTensor *tensor, long size0_, long size1_, long size2_, long size3_, long size4_); - -TH_API void THTensor_(set)(THTensor *self, THTensor *src); -TH_API void THTensor_(setStorage)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, THLongStorage *size_, THLongStorage *stride_); -TH_API void THTensor_(setStorageNd)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, int nDimension, long *size, long *stride); -TH_API void THTensor_(setStorage1d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_); -TH_API void THTensor_(setStorage2d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_, - long size1_, long stride1_); -TH_API void THTensor_(setStorage3d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_, - long size1_, long stride1_, - long size2_, long stride2_); -TH_API void THTensor_(setStorage4d)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, - long size0_, long stride0_, - long size1_, long stride1_, - long size2_, long stride2_, - long size3_, long stride3_); - -TH_API void THTensor_(narrow)(THTensor *self, THTensor *src, int dimension_, long firstIndex_, long size_); -TH_API void THTensor_(select)(THTensor *self, THTensor *src, int dimension_, long sliceIndex_); -TH_API void THTensor_(transpose)(THTensor *self, THTensor *src, int dimension1_, int dimension2_); -TH_API void THTensor_(unfold)(THTensor *self, THTensor *src, int dimension_, long size_, long step_); - -TH_API void THTensor_(squeeze)(THTensor *self, THTensor *src); -TH_API void THTensor_(squeeze1d)(THTensor *self, THTensor *src, int dimension_); -TH_API void THTensor_(unsqueeze1d)(THTensor *self, THTensor *src, int dimension_); - -TH_API int THTensor_(isContiguous)(const THTensor *self); -TH_API int THTensor_(isSameSizeAs)(const THTensor *self, const THTensor *src); -TH_API int THTensor_(isSetTo)(const THTensor *self, const THTensor *src); -TH_API int THTensor_(isSize)(const THTensor *self, const THLongStorage *dims); -TH_API ptrdiff_t THTensor_(nElement)(const THTensor *self); - -TH_API void THTensor_(retain)(THTensor *self); -TH_API void THTensor_(free)(THTensor *self); -TH_API void THTensor_(freeCopyTo)(THTensor *self, THTensor *dst); - -/* Slow access methods [check everything] */ -TH_API void THTensor_(set1d)(THTensor *tensor, long x0, real value); -TH_API void THTensor_(set2d)(THTensor *tensor, long x0, long x1, real value); -TH_API void THTensor_(set3d)(THTensor *tensor, long x0, long x1, long x2, real value); -TH_API void THTensor_(set4d)(THTensor *tensor, long x0, long x1, long x2, long x3, real value); - -TH_API real THTensor_(get1d)(const THTensor *tensor, long x0); -TH_API real THTensor_(get2d)(const THTensor *tensor, long x0, long x1); -TH_API real THTensor_(get3d)(const THTensor *tensor, long x0, long x1, long x2); -TH_API real THTensor_(get4d)(const THTensor *tensor, long x0, long x1, long x2, long x3); - -/* Debug methods */ -TH_API THDescBuff THTensor_(desc)(const THTensor *tensor); -TH_API THDescBuff THTensor_(sizeDesc)(const THTensor *tensor); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensorConv.c b/contrib/lua-torch/torch7/lib/TH/generic/THTensorConv.c deleted file mode 100644 index 684ff9db5f..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensorConv.c +++ /dev/null @@ -1,1957 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensorConv.c" -#else - -/* - 2D Input, 2D kernel : convolve given image with the given kernel. -*/ -void THTensor_(validXCorr2Dptr)(real *r_, - real alpha, - real *t_, long ir, long ic, - real *k_, long kr, long kc, - long sr, long sc) -{ - long or = (ir - kr) / sr + 1; - long oc = (ic - kc) / sc + 1; - - long xx, yy, kx, ky; - - if ((sc != 1) || (oc < 4)) { - /* regular convolution */ - for(yy = 0; yy < or; yy++) { - for(xx = 0; xx < oc; xx++) { - /* Dot product in two dimensions... (between input image and the mask) */ - real *pi_ = t_ + yy*sr*ic + xx*sc; - real *pw_ = k_; - real sum = 0; - for(ky = 0; ky < kr; ky++) { - for(kx = 0; kx < kc; kx++) { - sum += pi_[kx]*pw_[kx]; - } - pi_ += ic; /* next input line */ - pw_ += kc; /* next mask line */ - } - /* Update output */ - *r_++ += alpha*sum; - } - } - - } else { - /* SSE-based convolution */ - for(yy = 0; yy < or; yy++) { - real *pi_ = t_ + yy*sr*ic; - real *pw_ = k_; - for (ky = 0; ky < kr; ky++) { - real *pis_ = pi_; - for (kx = 0; kx < kc; kx++) { - THVector_(cadd)(r_, r_, pis_, alpha*pw_[kx], oc); - pis_++; - } - pi_ += ic; /* next input line */ - pw_ += kc; /* next mask line */ - } - r_ += oc; - } - } -} - -/* - 2D Input, 2D kernel : convolve given image with the given kernel. -*/ -void THTensor_(validConv2Dptr)(real *r_, - real alpha, - real *t_, long ir, long ic, - real *k_, long kr, long kc, - long sr, long sc) -{ - long or = (ir - kr) / sr + 1; - long oc = (ic - kc) / sc + 1; - - long xx, yy, kx, ky; - - if ((sc != 1) || (oc < 4)) { - /* regular convolution */ - for(yy = 0; yy < or; yy++) { - for(xx = 0; xx < oc; xx++) { - /* Dot product in two dimensions... (between input image and the mask) */ - real *pi_ = t_ + yy*sr*ic + xx*sc; - real *pw_ = k_ + kr*kc - 1; - real sum = 0; - for(ky = 0; ky < kr; ky++) { - for(kx = 0; kx < kc; kx++) { - sum += pi_[kx]*pw_[-kx]; - } - pi_ += ic; /* next input line */ - pw_ -= kc; /* next mask line */ - } - /* Update output */ - *r_++ += alpha*sum; - } - } - - } else { - /* SSE-based convolution */ - for(yy = 0; yy < or; yy++) { - real *pw_ = k_ + kr*kc - 1; - real *pi_ = t_ + yy*sr*ic; - for (ky = 0; ky < kr; ky++) { - real *pis_ = pi_; - for (kx = 0; kx < kc; kx++) { - THVector_(cadd)(r_, r_, pis_, alpha*pw_[-kx], oc); - pis_++; - } - pi_ += ic; /* next input line */ - pw_ -= kc; /* next mask line */ - } - r_ += oc; - } - } -} - -/* - 2D Input, 2D kernel : convolve given image with the given kernel, full convolution. -*/ -void THTensor_(fullConv2Dptr)(real *r_, - real alpha, - real *t_, long ir, long ic, - real *k_, long kr, long kc, - long sr, long sc) -{ - long oc = (ic - 1) * sc + kc; - - long xx, yy, kx, ky; - - if ((sc != 1) || (ic < 4)) { - /* regular convolution */ - for(yy = 0; yy < ir; yy++) { - for(xx = 0; xx < ic; xx++) { - /* Outer product in two dimensions... (between input image and the mask) */ - real *po_ = r_ + yy*sr*oc + xx*sc; - real *pw_ = k_; - for(ky = 0; ky < kr; ky++) - { - real z = *t_ * alpha; - for(kx = 0; kx < kc; kx++) { - po_[kx] += z * pw_[kx]; - } - po_ += oc; /* next input line */ - pw_ += kc; /* next mask line */ - } - t_++; - } - } - - } else { - /* SSE-based convolution */ - for(yy = 0; yy < ir; yy++) { - real *po_ = r_ + yy*sr*oc; - real *pw_ = k_; - for (ky = 0; ky < kr; ky++) { - real *pos_ = po_; - for (kx = 0; kx < kc; kx++) { - THVector_(cadd)(pos_, pos_, t_, alpha*pw_[kx], ic); - pos_++; - } - po_ += oc; /* next input line */ - pw_ += kc; /* next mask line */ - } - t_ += ic; - } - } -} - -/* - 2D Input, 2D kernel : convolve given image with the given kernel, full convolution. -*/ -void THTensor_(fullXCorr2Dptr)(real *r_, - real alpha, - real *t_, long ir, long ic, - real *k_, long kr, long kc, - long sr, long sc) -{ - long oc = (ic - 1) * sc + kc; - - long xx, yy, kx, ky; - - if ((sc != 1) || (ic < 4)) { - /* regular convolution */ - for(yy = 0; yy < ir; yy++) { - for(xx = 0; xx < ic; xx++) { - /* Outer product in two dimensions... (between input image and the mask) */ - real *po_ = r_ + yy*sr*oc + xx*sc; - real *pw_ = k_ + kr*kc -1; - long kx, ky; - for(ky = 0; ky < kr; ky++) - { - real z = *t_ * alpha; - for(kx = 0; kx < kc; kx++) { - po_[kx] += z * pw_[-kx]; - } - po_ += oc; /* next input line */ - pw_ -= kc; /* next mask line */ - } - t_++; - } - } - - } else { - /* SSE-based convolution */ - for(yy = 0; yy < ir; yy++) { - real *po_ = r_ + yy*sr*oc; - real *pw_ = k_ + kr*kc -1; - for (ky = 0; ky < kr; ky++) { - real *pos_ = po_; - for (kx = 0; kx < kc; kx++) { - THVector_(cadd)(pos_, pos_, t_, pw_[-kx]*alpha, ic); - pos_++; - } - po_ += oc; /* next input line */ - pw_ -= kc; /* next mask line */ - } - t_ += ic; - } - } -} - -/* - 2D Input, 2D kernel : convolve given image with the given kernel, valid convolution. - for sr,sc=1 this is equivalent to validXCorr2Dptr, but otherwise it is useful for - calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 -*/ -void THTensor_(validXCorr2DRevptr)(real *r_, - real alpha, - real *t_, long ir, long ic, - real *k_, long kr, long kc, - long sr, long sc) -{ - long or = ir - (kr - 1) * sr; - long oc = ic - (kc - 1) * sc; - - long xx, yy, kx, ky; - - if ((sc != 1) || (kc < 4)) { - /* regular convolution */ - for(yy = 0; yy < kr; yy++) { - for(xx = 0; xx < kc; xx++) { - real *po_ = r_; - real *pi_ = t_ + yy*sr*ic + xx*sc; - real z = *k_++ * alpha; - - for(ky = 0; ky < or; ky++) { - for(kx = 0; kx < oc; kx++) - po_[kx] += z * pi_[kx]; - pi_ += ic; - po_ += oc; - } - } - } - - } else { - /* SSE-based convolution */ - for(yy = 0; yy < kr; yy++) { - for(xx = 0; xx < kc; xx++) { - real *po_ = r_; - real *pi_ = t_ + yy*sr*ic + xx*sc; - real z = *k_++ * alpha; - - for(ky = 0; ky < or; ky++) { - THVector_(cadd)(po_, po_, pi_, z, oc); - pi_ += ic; - po_ += oc; - } - } - } - } -} -/* - 3D Input, 3D kernel : convolve given volume with the given kernel. -*/ -void THTensor_(validXCorr3Dptr)(real *r_, - real alpha, - real *t_, long it, long ir, long ic, - real *k_, long kt, long kr, long kc, - long st, long sr, long sc) -{ - long ot = (it - kt) / st + 1; - long or = (ir - kr) / sr + 1; - long oc = (ic - kc) / sc + 1; - - long zz, xx, yy; - - for (zz = 0; zz < ot; zz++) - { - for(yy = 0; yy < or; yy++) - { - for(xx = 0; xx < oc; xx++) - { - /* Dot product in two dimensions... (between input image and the mask) */ - real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; - real *pw_ = k_; - real sum = 0; - long kz, kx, ky; - for(kz = 0; kz < kt; kz++) - { - for(ky = 0; ky < kr; ky++) - { - for(kx = 0; kx < kc; kx++) { - sum += pi_[kx]*pw_[kx]; - } - pi_ += ic; /* next input line */ - pw_ += kc; /* next mask line */ - } - pi_ += (ir-kr)*ic; /* next input slice */ - } - /* Update output */ - *r_++ += sum*alpha; - } - } - } -} - -/* - 3D Input, 3D kernel : convolve given volume with the given kernel. -*/ -void THTensor_(validConv3Dptr)(real *r_, - real alpha, - real *t_, long it, long ir, long ic, - real *k_, long kt, long kr, long kc, - long st, long sr, long sc) -{ - long ot = (it - kt) / st + 1; - long or = (ir - kr) / sr + 1; - long oc = (ic - kc) / sc + 1; - - long zz, xx, yy; - - for(zz = 0; zz < ot; zz++) - { - for(yy = 0; yy < or; yy++) - { - for(xx = 0; xx < oc; xx++) - { - /* Dot product in two dimensions... (between input image and the mask) */ - real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; - real *pw_ = k_ + kt*kr*kc - 1; - real sum = 0; - long kz, kx, ky; - for(kz = 0; kz < kt; kz++) - { - for(ky = 0; ky < kr; ky++) - { - for(kx = 0; kx < kc; kx++) { - sum += pi_[kx]*pw_[-kx]; - } - pi_ += ic; /* next input line */ - pw_ -= kc; /* next mask line */ - } - pi_ += (ir-kr)*ic; /* next input slice */ - } - /* Update output */ - *r_++ += alpha*sum; - } - } - } -} - - -/* - 3D Input, 3D kernel : convolve given volume with the given kernel, full convolution. -*/ -void THTensor_(fullConv3Dptr)(real *r_, - real alpha, - real *t_, long it, long ir, long ic, - real *k_, long kt, long kr, long kc, - long st, long sr, long sc) -{ - long or = (ir - 1) * sr + kr; - long oc = (ic - 1) * sc + kc; - - long zz, xx, yy; - - for(zz = 0; zz < it; zz++) - { - for(yy = 0; yy < ir; yy++) - { - for(xx = 0; xx < ic; xx++) - { - /* Outer product in two dimensions... (between input image and the mask) */ - real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc; - real *pw_ = k_; - long kz, kx, ky; - /* printf("Output Plane : %ld,%ld,%ld, input val=%g\n",zz,yy,xx,*t_); */ - for(kz = 0; kz < kt; kz++) - { - for(ky = 0; ky < kr; ky++) - { - real z = *t_ * alpha; - for(kx = 0; kx < kc; kx++) { - /* printf("o=%g,k=%g," , po_[kx],pw_[kx]); */ - po_[kx] += z * pw_[kx]; - /* printf("o=%g " , po_[kx]); */ - } - /* printf("\n"); */ - po_ += oc; /* next input line */ - pw_ += kc; /* next mask line */ - } - po_ += (or-kr)*oc; /* next output slice */ - /* printf("\n"); */ - } - t_++; - } - } - } -} - -/* - 3D Input, 3D kernel : convolve given volume with the given kernel, full convolution. -*/ -void THTensor_(fullXCorr3Dptr)(real *r_, - real alpha, - real *t_, long it, long ir, long ic, - real *k_, long kt, long kr, long kc, - long st, long sr, long sc) -{ - long or = (ir - 1) * sr + kr; - long oc = (ic - 1) * sc + kc; - - long zz, xx, yy; - - for(zz = 0; zz < it; zz++) - { - for(yy = 0; yy < ir; yy++) - { - for(xx = 0; xx < ic; xx++) - { - /* Outer product in two dimensions... (between input image and the mask) */ - real *po_ = r_ + zz*st*or*oc + yy*sr*oc + xx*sc; - real *pw_ = k_ + kt*kr*kc -1; - long kz, kx, ky; - for(kz = 0; kz < kt; kz++) - { - for(ky = 0; ky < kr; ky++) - { - real z = *t_ * alpha; - for(kx = 0; kx < kc; kx++) { - po_[kx] += z * pw_[-kx]; - } - po_ += oc; /* next input line */ - pw_ -= kc; /* next mask line */ - } - po_ += (or-kr)*oc; /* next output slice */ - } - t_++; - } - } - } -} - -/* - 3D Input, 3D kernel : convolve given image with the given kernel, valid convolution. - for sr,sc=1 this is equivalent to validXCorr3Dptr, but otherwise it is useful for - calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 -*/ -void THTensor_(validXCorr3DRevptr)(real *r_, - real alpha, - real *t_, long it, long ir, long ic, - real *k_, long kt, long kr, long kc, - long st, long sr, long sc) -{ - long ot = it - (kt - 1) * st; - long or = ir - (kr - 1) * sr; - long oc = ic - (kc - 1) * sc; - - long zz, xx, yy; - for(zz = 0; zz < kt; zz++) - { - for(yy = 0; yy < kr; yy++) - { - for(xx = 0; xx < kc; xx++) - { - real *po_ = r_; - real *pi_ = t_ + zz*st*ir*ic + yy*sr*ic + xx*sc; - real z = *k_++ * alpha; - long kz, kx, ky; - for(kz = 0; kz < ot; kz++) - { - for(ky = 0; ky < or; ky++) - { - for(kx = 0; kx < oc; kx++) - po_[kx] += z * pi_[kx]; - pi_ += ic; - po_ += oc; - } - pi_ += (ir-or)*ic; /* next input slice */ - } - } - } - } -} - -void THTensor_(conv2d)(real* output_data, - real alpha, - real* ptr_input, long nInputRows, long nInputCols, - real* ptr_weight, long nKernelRows, long nKernelCols, - long srow, long scol, - const char *vf, const char *xc) -{ - THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); - THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); - if (*vf == 'F') - if (*xc == 'X') - THTensor_(fullXCorr2Dptr)(output_data, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - THTensor_(fullConv2Dptr)(output_data, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - if (*xc == 'X') - THTensor_(validXCorr2Dptr)(output_data, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - THTensor_(validConv2Dptr)(output_data, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); -} - -void THTensor_(conv3d)(real* output_data, - real alpha, - real* ptr_input, long nInputDepth, long nInputRows, long nInputCols, - real* ptr_weight, long nKernelDepth, long nKernelRows, long nKernelCols, - long sdepth, long srow, long scol, - const char *vf, const char *xc) -{ - THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can be 'V' or 'F'"); - THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can be 'X' or 'C'"); - if (*vf == 'F') - if (*xc == 'X') - THTensor_(fullXCorr3Dptr)(output_data, - alpha, - ptr_input, nInputDepth, nInputRows, nInputCols, - ptr_weight, nKernelDepth, nKernelRows, nKernelCols, - sdepth, srow, scol); - else - THTensor_(fullConv3Dptr)(output_data, - alpha, - ptr_input, nInputDepth, nInputRows, nInputCols, - ptr_weight, nKernelDepth, nKernelRows, nKernelCols, - sdepth, srow, scol); - else - if (*xc == 'X') - THTensor_(validXCorr3Dptr)(output_data, - alpha, - ptr_input, nInputDepth, nInputRows, nInputCols, - ptr_weight, nKernelDepth, nKernelRows, nKernelCols, - sdepth, srow, scol); - else - THTensor_(validConv3Dptr)(output_data, - alpha, - ptr_input, nInputDepth, nInputRows, nInputCols, - ptr_weight, nKernelDepth, nKernelRows, nKernelCols, - sdepth, srow, scol); -} - -long THTensor_(convsize)(long x, long k, long s, const char* vf) -{ - THArgCheck(*vf == 'V' || *vf == 'F', 1, "type of convolution can be 'V' or 'F'"); - if (*vf == 'V') - return (x-k)/s + 1; - else - return (x-1)*s + k; -} - - -/* - 3D input, 3D kernel, 4D output - like rank1 update - A <- xx' + beta*A - for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for - calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 -*/ -void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol) -{ - long nInputPlane, nInputRows, nInputCols; - long nKernelPlane, nKernelRows, nKernelCols; - long nOutputPlane, nOutputRows, nOutputCols; - long istride0, kstride0; - THTensor *input; - THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; - ptrdiff_t nelem; - long k; - - THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); - THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); - THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - nInputPlane = input->size[0]; - istride0 = input->stride[0]; - nInputRows = input->size[1]; - nInputCols = input->size[2]; - - kstride0 = kernel->stride[0]; - nKernelPlane = kernel->size[0]; - nKernelRows = kernel->size[1]; - nKernelCols = kernel->size[2]; - nOutputPlane = nInputPlane * kernel->size[0]; - - THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "covn2DRevger : Input image is smaller than kernel"); - - nOutputRows = nInputRows - (nKernelRows - 1) * srow; - nOutputCols = nInputCols - (nKernelCols - 1) * scol; - - nelem = THTensor_(nElement)(r_); - THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - /*THTensor_(zero)(r_);*/ - -#pragma omp parallel for private(k) - for (k = 0; k < r_->size[0]*r_->size[1]; k++) - { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; - long l; - for (l = 0; l < nOutputRows*nOutputCols; l++) - ptr_output[l] = 0.0; - } - } - else if (beta != 1) - { - /*THTensor_(mul)(r_, beta);*/ -#pragma omp parallel for private(k) - for (k = 0; k < r_->size[0]*r_->size[1]; k++) - { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; - long l; - for (l = 0; l < nOutputRows*nOutputCols; l++) - ptr_output[l] *= beta; - } - } - -#pragma omp parallel for private(k) - for(k = 0; k < nKernelPlane; k++) - { - long i; - /* get kernel */ - real *ptr_weight = weight_data+k*kstride0; - - for(i = 0; i < nInputPlane; i++) - { - /* get output */ - real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; - /* get input */ - real *ptr_input = input_data+i*istride0; - - /* do image, kernel convolution */ - THTensor_(validXCorr2DRevptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - /* Next output plane */ - /* output_data += nOutputCols*nOutputRows; */ - } - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - - -/* - 3D input, 3D kernel, 4D output - like rank1 update - A <- xx' + beta*A - for sr,sc=1 this is equivalent to conv2Dger, but otherwise it is useful for - calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 -*/ -void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol) -{ - long nbatch, nInputPlane, nInputRows, nInputCols; - long nKernelPlane, nKernelRows, nKernelCols; - long nOutputRows, nOutputCols; - long istride0, kstride0, istride1, kstride1; - THTensor *input; - THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; - ptrdiff_t nelem; - long k; - - THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); - THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); - THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - istride0 = input->stride[0]; - istride1 = input->stride[1]; - nbatch = input->size[0]; - nInputPlane = input->size[1]; - nInputRows = input->size[2]; - nInputCols = input->size[3]; - - kstride0 = kernel->stride[0]; - kstride1 = kernel->stride[1]; - nKernelPlane = kernel->size[1]; - nKernelRows = kernel->size[2]; - nKernelCols = kernel->size[3]; - - THArgCheck(nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv2DRevger : Input image is smaller than kernel"); - THArgCheck(kernel->size[0] == input->size[0] , 2, "conv2DRevger : Input batch and kernel batch is not same size"); - - nOutputRows = nInputRows - (nKernelRows - 1) * srow; - nOutputCols = nInputCols - (nKernelCols - 1) * scol; - - nelem = THTensor_(nElement)(r_); - THTensor_(resize4d)(r_,nKernelPlane, nInputPlane, nOutputRows, nOutputCols); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - /*THTensor_(zero)(r_);*/ - -#pragma omp parallel for private(k) - for (k = 0; k < r_->size[0]*r_->size[1]; k++) - { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; - long l; - for (l = 0; l < nOutputRows*nOutputCols; l++) - ptr_output[l] = 0.0; - } - } - else if (beta != 1) - { - /*THTensor_(mul)(r_, beta);*/ -#pragma omp parallel for private(k) - for (k = 0; k < r_->size[0]*r_->size[1]; k++) - { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; - long l; - for (l = 0; l < nOutputRows*nOutputCols; l++) - ptr_output[l] *= beta; - } - } - -#pragma omp parallel for private(k) - for(k = 0; k < nKernelPlane; k++) - { - long i; - for(i = 0; i < nInputPlane; i++) - { - long p; - for(p = 0; p < nbatch; p++) - { - /* get kernel */ - real *ptr_weight = weight_data + p*kstride0 + k*kstride1; - /* get output */ - real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; - /* get input */ - real *ptr_input = input_data + p*istride0 + i*istride1; - - /* do image, kernel convolution */ - THTensor_(validXCorr2DRevptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - /* Next output plane */ - /* output_data += nOutputCols*nOutputRows; */ - } - } - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - - -/* - 3D input, 3D kernel, 4D output - like rank1 update - A <- xx' + beta*A -*/ -void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) -{ - long nInputPlane, nInputRows, nInputCols; - long nKernelPlane, nKernelRows, nKernelCols; - long nOutputPlane, nOutputRows, nOutputCols; - long istride0, kstride0; - - THTensor *input; - THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; - ptrdiff_t nelem; - long k; - - THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); - THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); - THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); - THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); - THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - nInputPlane = input->size[0]; - istride0 = input->stride[0]; - nInputRows = input->size[1]; - nInputCols = input->size[2]; - - kstride0 = kernel->stride[0]; - nKernelPlane = kernel->size[0]; - nKernelRows = kernel->size[1]; - nKernelCols = kernel->size[2]; - nOutputPlane = nInputPlane * kernel->size[0]; - - THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dger : Input image is smaller than kernel"); - - if (*vf == 'F') { - nOutputRows = (nInputRows - 1) * srow + nKernelRows; - nOutputCols = (nInputCols - 1) * scol + nKernelCols; - } else { /* valid */ - nOutputRows = (nInputRows - nKernelRows) / srow + 1; - nOutputCols = (nInputCols - nKernelCols) / scol + 1; - } - - nelem = THTensor_(nElement)(r_); - THTensor_(resize4d)(r_, nKernelPlane, nInputPlane, nOutputRows, nOutputCols); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - /*THTensor_(zero)(r_);*/ -#pragma omp parallel for private(k) - for (k = 0; k < r_->size[0]*r_->size[1]; k++) - { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; - long l; - for (l = 0; l < nOutputRows*nOutputCols; l++) - ptr_output[l] = 0.0; - } - } - else if (beta != 1) - { - /*THTensor_(mul)(r_, beta);*/ -#pragma omp parallel for private(k) - for (k = 0; k < r_->size[0]*r_->size[1]; k++) - { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; - long l; - for (l = 0; l < nOutputRows*nOutputCols; l++) - ptr_output[l] *= beta; - } - } - -#pragma omp parallel for private(k) - for(k = 0; k < nKernelPlane; k++) - { - long i; - /* get kernel */ - real *ptr_weight = weight_data+k*kstride0; - - for(i = 0; i < nInputPlane; i++) - { - /* get output */ - real *ptr_output = output_data + k*nInputPlane*nOutputCols*nOutputRows + i*nOutputCols*nOutputRows; - /* get input */ - real *ptr_input = input_data+i*istride0; - - /* do image, kernel convolution */ - if (*vf == 'F') - if (*xc == 'X') - THTensor_(fullXCorr2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - THTensor_(fullConv2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - if (*xc == 'X') - THTensor_(validXCorr2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - THTensor_(validConv2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - /* Next output plane */ - /* output_data += nOutputCols*nOutputRows; */ - } - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - - -/* - 3D input, 4D kernel, 3D output - matrix vector product like - y <- Ax + beta*y -*/ -void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) -{ - long nInputPlane, nInputRows, nInputCols; - long nKernelRows, nKernelCols; - long nOutputPlane, nOutputRows, nOutputCols; - long istride0, kstride0, kstride1; - THTensor *input; - THTensor* kernel; - real *input_data; - real *weight_data; - real *output_data; - ptrdiff_t nelem; - long k; - - THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); - THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); - THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); - THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); - THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); - - input = THTensor_(newContiguous)(t_); - if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { - kernel = THTensor_(newContiguous)(k_); - } else { - THTensor_(retain)(k_); - kernel = k_; - } - - nInputPlane = input->size[0]; - istride0 = input->stride[0]; - nInputRows = input->size[1]; - nInputCols = input->size[2]; - - kstride0 = kernel->stride[0]; - kstride1 = kernel->stride[1]; - nKernelRows = kernel->size[2]; - nKernelCols = kernel->size[3]; - nOutputPlane = kernel->size[0]; - THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); - - THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); - - if (*vf == 'F') { - nOutputRows = (nInputRows - 1) * srow + nKernelRows; - nOutputCols = (nInputCols - 1) * scol + nKernelCols; - } else { /* valid */ - nOutputRows = (nInputRows - nKernelRows) / srow + 1; - nOutputCols = (nInputCols - nKernelCols) / scol + 1; - } - - nelem = THTensor_(nElement)(r_); - THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - /*THTensor_(zero)(r_);*/ -#pragma omp parallel for private(k) - for (k = 0; k < r_->size[0]; k++) - { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; - long l; - for (l = 0; l < nOutputRows*nOutputCols; l++) - ptr_output[l] = 0.0; - } - } - else if (beta != 1) - { - /*THTensor_(mul)(r_, beta);*/ -#pragma omp parallel for private(k) - for (k = 0; k < r_->size[0]; k++) - { - real* ptr_output = output_data + k*nOutputCols*nOutputRows; - long l; - for (l = 0; l < nOutputRows*nOutputCols; l++) - ptr_output[l] *= beta; - } - } - -#pragma omp parallel for private(k) - for(k = 0; k < nOutputPlane; k++) - { - long i; - /* get output */ - real *ptr_output = output_data + k*nOutputCols*nOutputRows; - for(i = 0; i < nInputPlane; i++) - { - /* get kernel */ - real *ptr_weight = weight_data + k*kstride0 + i*kstride1; - /* get input */ - real *ptr_input = input_data + i*istride0; - - /* do image, kernel convolution */ - if (*vf == 'F') - if (*xc == 'X') - THTensor_(fullXCorr2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - THTensor_(fullConv2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - if (*xc == 'X') - THTensor_(validXCorr2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - THTensor_(validConv2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - } - /* Next output plane */ - /* output_data += nOutputCols*nOutputRows;*/ - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - - -/* - 3D input, 4D kernel, 3D output - matrix vector product like - y <- Ax + beta*y -*/ -void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) -{ - long nInputPlane, nInputRows, nInputCols; - long nKernelRows, nKernelCols; - long nOutputPlane, nOutputRows, nOutputCols; - long kstride0, kstride1; - THTensor *input; - THTensor* kernel; - long nbatch; - ptrdiff_t nelem; - real *input_data; - real *weight_data; - real *output_data; - long p; - - THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); - THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); - THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); - THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); - THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); - - input = THTensor_(newContiguous)(t_); - if (!(k_->stride[3] == 1) || !(k_->stride[2] == k_->size[3])) { - kernel = THTensor_(newContiguous)(k_); - } else { - THTensor_(retain)(k_); - kernel = k_; - } - - nbatch = input->size[0]; - nInputPlane = input->size[1]; - nInputRows = input->size[2]; - nInputCols = input->size[3]; - - kstride0 = kernel->stride[0]; - kstride1 = kernel->stride[1]; - nKernelRows = kernel->size[2]; - nKernelCols = kernel->size[3]; - nOutputPlane = kernel->size[0]; - THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); - - THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmv : Input image is smaller than kernel"); - - if (*vf == 'F') { - nOutputRows = (nInputRows - 1) * srow + nKernelRows; - nOutputCols = (nInputCols - 1) * scol + nKernelCols; - } else { /* valid */ - nOutputRows = (nInputRows - nKernelRows) / srow + 1; - nOutputCols = (nInputCols - nKernelCols) / scol + 1; - } - - nelem = THTensor_(nElement)(r_); - THTensor_(resize4d)(r_, nbatch, nOutputPlane, nOutputRows, nOutputCols); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - /*THTensor_(zero)(r_);*/ -#pragma omp parallel for private(p) - for (p=0; p < r_->size[0]; p++) - { - long k; - for (k = 0; k < r_->size[1]; k++) - { - real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; - long l; - for (l = 0; l < nOutputRows*nOutputCols; l++) - ptr_output[l] = 0.0; - } - } - } - else if (beta != 1) - { - /*THTensor_(mul)(r_, beta);*/ -#pragma omp parallel for private(p) - for(p=0; p < r_->size[0]; p++) - { - long k; - for (k = 0; k < r_->size[1]; k++) - { - real* ptr_output = output_data + p*nOutputPlane*nOutputRows*nOutputCols + k*nOutputCols*nOutputRows; - long l; - for (l = 0; l < nOutputRows*nOutputCols; l++) - ptr_output[l] *= beta; - } - } - } - -#pragma omp parallel for private(p) - for(p=0; p < nbatch; p++) - { - long k; - for(k = 0; k < nOutputPlane; k++) - { - long i; - /* get output */ - real *ptr_output = output_data + p*nOutputPlane*nOutputCols*nOutputRows + k*nOutputCols*nOutputRows; - for(i = 0; i < nInputPlane; i++) - { - /* get kernel */ - real *ptr_weight = weight_data + k*kstride0 + i*kstride1; - /* get input */ - real *ptr_input = input_data + p*nInputPlane*nInputRows*nInputCols + i*nInputRows*nInputCols; - - /* do image, kernel convolution */ - if (*vf == 'F') - if (*xc == 'X') - THTensor_(fullXCorr2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - THTensor_(fullConv2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - if (*xc == 'X') - THTensor_(validXCorr2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - else - THTensor_(validConv2Dptr)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol); - } - /* Next output plane */ - /* output_data += nOutputCols*nOutputRows;*/ - } - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - - -/* - 2D input, 2D kernel, 2D output - scalar multiplication like - y <- x*y + beta*y -*/ -void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) -{ - THTensor *input; - THTensor* kernel; - long nInputRows; - long nInputCols; - long nKernelRows; - long nKernelCols; - long nOutputRows, nOutputCols; - real *ptr_input; - real *ptr_weight; - real *output_data; - ptrdiff_t nelem; - - THArgCheck(t_->nDimension == 2 , 3, "input: 2D Tensor expected"); - THArgCheck(k_->nDimension == 2 , 4, "kernel: 2D Tensor expected"); - THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - nInputRows = input->size[0]; - nInputCols = input->size[1]; - nKernelRows = kernel->size[0]; - nKernelCols = kernel->size[1]; - - THArgCheck((nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dmul : Input image is smaller than kernel"); - - nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); - nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); - - nelem = THTensor_(nElement)(r_); - THTensor_(resize2d)(r_, nOutputRows, nOutputCols); - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - THTensor_(zero)(r_); - else if (beta != 1) - THTensor_(mul)(r_, r_, beta); - - ptr_input = THTensor_(data)(input); - ptr_weight = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - - /* do image, kernel convolution */ - THTensor_(conv2d)(output_data, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol, vf, xc); - THTensor_(free)(input); - THTensor_(free)(kernel); -} - -/* - 3D input, 3D kernel, 3D output - component wise multiplication like - y <- y.*x + beta*y -*/ -void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc) -{ - long nInputPlane, nInputRows, nInputCols; - long nKernelRows, nKernelCols; - long nOutputPlane, nOutputRows, nOutputCols; - long istride0, kstride0; - THTensor *input; - THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; - ptrdiff_t nelem; - long k; - - THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); - THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); - THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - istride0 = input->stride[0]; - nInputPlane = input->size[0]; - nInputRows = input->size[1]; - nInputCols = input->size[2]; - - kstride0 = kernel->stride[0]; - nOutputPlane = kernel->size[0]; - nKernelRows = kernel->size[1]; - nKernelCols = kernel->size[2]; - - THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); - THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv2Dcmul : Input image is smaller than kernel"); - - nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); - nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); - - nelem = THTensor_(nElement)(r_); - THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - THTensor_(zero)(r_); - } - else if (beta != 1) - THTensor_(mul)(r_, r_, beta); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - for(k = 0; k < nOutputPlane; k++) - { - /* get kernel */ - real *ptr_weight = weight_data + k*kstride0; - /* get input */ - real *ptr_input = input_data + k*istride0; - - /* do image, kernel convolution */ - THTensor_(conv2d)(output_data, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol, vf, xc); - /* Next output plane */ - output_data += nOutputCols*nOutputRows; - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - -/* - 3D input, 3D kernel, 3D output - component wise multiplication like with a permutation map - y <- y.*x + beta*y -*/ -void THTensor_(conv2Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, long srow, long scol, const char *vf, const char *xc) -{ - long nInputPlane, nInputRows, nInputCols; - long nKernelRows, nKernelCols; - long nOutputPlane, nOutputRows, nOutputCols; - long istride0, kstride0; - THTensor *input; - THTensor* kernel; - real *input_data; - real *weight_data; - real *output_data; - long nmaps; - ptrdiff_t nelem; - long k; - - THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); - THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); - THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected"); - THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - istride0 = input->stride[0]; - nInputPlane = input->size[0]; - nInputRows = input->size[1]; - nInputCols = input->size[2]; - - kstride0 = kernel->stride[0]; - nOutputPlane = kernel->size[0]; - nKernelRows = kernel->size[1]; - nKernelCols = kernel->size[2]; - - THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); - THArgCheck( (nInputRows >= nKernelRows && nInputCols >= nKernelCols) - || *vf == 'F', 2, "conv2Dmap : Input image is smaller than kernel"); - - nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); - nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); - - nelem = THTensor_(nElement)(r_); - THTensor_(resize3d)(r_, nOutputPlane, nOutputRows, nOutputCols); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - THTensor_(zero)(r_); - } - else if (beta != 1) - THTensor_(mul)(r_, r_, beta); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - nmaps = map->size[0]; - - for(k = 0; k < nmaps; k++) - { - /* get indices */ - long from = (long)THTensor_(get2d)(map,k,0)-1; - long to = (long)THTensor_(get2d)(map,k,1)-1; - - /* get kernel */ - real *ptr_weight = weight_data + k*kstride0; - /* get input */ - real *ptr_input = input_data + from*istride0; - /* get output */ - real *ptr_output = output_data + to*nOutputRows*nOutputCols; - - /* do image, kernel convolution */ - THTensor_(conv2d)(ptr_output, - alpha, - ptr_input, nInputRows, nInputCols, - ptr_weight, nKernelRows, nKernelCols, - srow, scol, vf, xc); - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - -/* - 4D input, 4D kernel, 5D output - like rank1 update - A <- xx' + beta*A - for sr,sc=1 this is equivalent to xcorr2Dger, but otherwise it is useful for - calculating derivatives wrt a kernel that is applied with stride sr,sc != 1 -*/ -void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, - long sdepth, long srow, long scol) -{ - long nInputPlane, nInputDepth, nInputRows, nInputCols; - long nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; - long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; - long istride0, kstride0; - THTensor *input; - THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; - ptrdiff_t nelem; - long k, i; - - THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); - THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); - THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); - THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - nInputPlane = input->size[0]; - istride0 = input->stride[0]; - nInputDepth = input->size[1]; - nInputRows = input->size[2]; - nInputCols = input->size[3]; - - kstride0 = kernel->stride[0]; - nKernelPlane = kernel->size[0]; - nKernelDepth= kernel->size[1]; - nKernelRows = kernel->size[2]; - nKernelCols = kernel->size[3]; - nOutputPlane = nInputPlane * kernel->size[0]; - - THArgCheck(nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols , 2, "conv3DRevger : Input image is smaller than kernel"); - - nOutputDepth = nInputDepth - (nKernelDepth - 1) * sdepth; - nOutputRows = nInputRows - (nKernelRows - 1) * srow; - nOutputCols = nInputCols - (nKernelCols - 1) * scol; - - nelem = THTensor_(nElement)(r_); - THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - THTensor_(zero)(r_); - } - else if (beta != 1) - THTensor_(mul)(r_, r_, beta); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - for(k = 0; k < nKernelPlane; k++) - { - /* get kernel */ - real *ptr_weight = weight_data+k*kstride0; - - for(i = 0; i < nInputPlane; i++) - { - /* get input */ - real *ptr_input = input_data+i*istride0; - - /* do image, kernel convolution */ - THTensor_(validXCorr3DRevptr)(output_data, - alpha, - ptr_input, nInputDepth, nInputRows, nInputCols, - ptr_weight, nKernelDepth, nKernelRows, nKernelCols, - sdepth, srow, scol); - /* Next output plane */ - output_data += nOutputDepth*nOutputCols*nOutputRows; - } - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - - -/* - 4D input, 4D kernel, 5D output - like rank1 update - A <- xx' + beta*A -*/ -void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, - long sdepth, long srow, long scol, const char *vf, const char *xc) -{ - long nInputPlane, nInputDepth, nInputRows, nInputCols; - long nKernelPlane, nKernelDepth, nKernelRows, nKernelCols; - long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; - long istride0, kstride0; - THTensor *input; - THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; - ptrdiff_t nelem; - long k, i; - - THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); - THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); - THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); - THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); - THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); - THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - nInputPlane = input->size[0]; - istride0 = input->stride[0]; - nInputDepth = input->size[1]; - nInputRows = input->size[2]; - nInputCols = input->size[3]; - - kstride0 = kernel->stride[0]; - nKernelPlane = kernel->size[0]; - nKernelDepth = kernel->size[1]; - nKernelRows = kernel->size[2]; - nKernelCols = kernel->size[3]; - nOutputPlane = nInputPlane * kernel->size[0]; - - THArgCheck((nInputDepth >= nKernelDepth - && nInputRows >= nKernelRows - && nInputCols >= nKernelCols) - || *vf == 'F', 2, "conv3Dger : Input image is smaller than kernel"); - - nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); - nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); - nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); - - nelem = THTensor_(nElement)(r_); - THTensor_(resize5d)(r_,nKernelPlane, nInputPlane, nOutputDepth, nOutputRows, nOutputCols); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - THTensor_(zero)(r_); - } - else if (beta != 1) - THTensor_(mul)(r_, r_, beta); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - for(k = 0; k < nKernelPlane; k++) - { - /* get kernel */ - real *ptr_weight = weight_data+k*kstride0; - - for(i = 0; i < nInputPlane; i++) - { - /* get input */ - real *ptr_input = input_data+i*istride0; - - /* do image, kernel convolution */ - THTensor_(conv3d)(output_data, - alpha, - ptr_input, nInputDepth, nInputRows, nInputCols, - ptr_weight, nKernelDepth, nKernelRows, nKernelCols, - sdepth, srow, scol, vf, xc); - - /* Next output plane */ - output_data += nOutputDepth*nOutputCols*nOutputRows; - } - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - -/* - 4D input, 5D kernel, 4D output - matrix vector product like - y <- Ax + beta*y -*/ -void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, - long sdepth, long srow, long scol, const char *vf, const char *xc) -{ - long nInputPlane, nInputDepth, nInputRows, nInputCols; - long nKernelDepth, nKernelRows, nKernelCols; - long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; - long istride0, kstride0, kstride1; - THTensor *input; - THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; - ptrdiff_t nelem; - long k, i; - - THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); - THArgCheck(k_->nDimension == 5 , 4, "kernel: 5D Tensor expected"); - THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); - THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); - THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); - THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); - - input = THTensor_(newContiguous)(t_); - if (!(k_->stride[4] == 1) || !(k_->stride[3] == k_->size[4])) { - kernel = THTensor_(newContiguous)(k_); - } else { - THTensor_(retain)(k_); - kernel = k_; - } - - nInputPlane = input->size[0]; - istride0 = input->stride[0]; - nInputDepth = input->size[1]; - nInputRows = input->size[2]; - nInputCols = input->size[3]; - - kstride0 = kernel->stride[0]; - kstride1 = kernel->stride[1]; - nKernelDepth = kernel->size[2]; - nKernelRows = kernel->size[3]; - nKernelCols = kernel->size[4]; - nOutputPlane = kernel->size[0]; - THArgCheck(kernel->size[1] == nInputPlane, 2, "invalid number of input planes"); - - THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmv : Input image is smaller than kernel"); - - nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); - nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); - nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); - - nelem = THTensor_(nElement)(r_); - THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - THTensor_(zero)(r_); - } - else if (beta != 1) - THTensor_(mul)(r_, r_, beta); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - for(k = 0; k < nOutputPlane; k++) - { - for(i = 0; i < nInputPlane; i++) - { - /* get kernel */ - real *ptr_weight = weight_data + k*kstride0 + i*kstride1; - /* get input */ - real *ptr_input = input_data + i*istride0; - - /* do image, kernel convolution */ - THTensor_(conv3d)(output_data, - alpha, - ptr_input, nInputDepth, nInputRows, nInputCols, - ptr_weight, nKernelDepth, nKernelRows, nKernelCols, - sdepth, srow, scol, vf, xc); - } - /* Next output plane */ - output_data += nOutputDepth*nOutputCols*nOutputRows; - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - -/* - 3D input, 3D kernel, 3D output - scalar multiplication like - y <- x*y + beta*y -*/ -void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, - long sdepth, long srow, long scol, const char *vf, const char *xc) -{ - THTensor *input; - THTensor* kernel; - long nInputDepth; - long nInputRows; - long nInputCols; - long nKernelDepth; - long nKernelRows; - long nKernelCols; - long nOutputDepth, nOutputRows, nOutputCols; - real *ptr_input; - real *ptr_weight; - real *output_data; - ptrdiff_t nelem; - - THArgCheck(t_->nDimension == 3 , 3, "input: 3D Tensor expected"); - THArgCheck(k_->nDimension == 3 , 4, "kernel: 3D Tensor expected"); - THArgCheck(sdepth >= 1, 5, "Stride should be a positive integer"); - THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); - THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); - THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - nInputDepth = input->size[0]; - nInputRows = input->size[1]; - nInputCols = input->size[2]; - nKernelDepth = kernel->size[0]; - nKernelRows = kernel->size[1]; - nKernelCols = kernel->size[2]; - - THArgCheck((nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dmul : Input image is smaller than kernel"); - - nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); - nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); - nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); - - nelem = THTensor_(nElement)(r_); - THTensor_(resize3d)(r_, nOutputDepth, nOutputRows, nOutputCols); - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - THTensor_(zero)(r_); - else if (beta != 1) - THTensor_(mul)(r_, r_, beta); - - ptr_input = THTensor_(data)(input); - ptr_weight = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - - /* do image, kernel convolution */ - THTensor_(conv3d)(output_data, - alpha, - ptr_input, nInputDepth, nInputRows, nInputCols, - ptr_weight, nKernelDepth, nKernelRows, nKernelCols, - sdepth, srow, scol, vf, xc); - THTensor_(free)(input); - THTensor_(free)(kernel); -} - -/* - 4D input, 4D kernel, 4D output - component wise multiplication like - y <- y.*x + beta*y -*/ -void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, - long sdepth, long srow, long scol, const char *vf, const char *xc) -{ - long nInputPlane, nInputDepth, nInputRows, nInputCols; - long nKernelDepth, nKernelRows, nKernelCols; - long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; - long istride0, kstride0; - - THTensor *input; - THTensor *kernel; - real *input_data; - real *weight_data; - real *output_data; - ptrdiff_t nelem; - long k; - - THArgCheck(t_->nDimension == 4 , 3, "input: 3D Tensor expected"); - THArgCheck(k_->nDimension == 4 , 4, "kernel: 3D Tensor expected"); - THArgCheck(srow >= 1, 5, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 6, "Stride should be a positive integer"); - THArgCheck(*vf == 'V' || *vf == 'F', 7, "type of convolution can 'V' or 'F'"); - THArgCheck(*xc == 'C' || *xc == 'X', 7, "type of convolution can 'X' or 'C'"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - istride0 = input->stride[0]; - nInputPlane = input->size[0]; - nInputDepth = input->size[1]; - nInputRows = input->size[2]; - nInputCols = input->size[3]; - - kstride0 = kernel->stride[0]; - nOutputPlane = kernel->size[0]; - nKernelDepth = kernel->size[1]; - nKernelRows = kernel->size[2]; - nKernelCols = kernel->size[3]; - - THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); - THArgCheck( (nInputDepth >= nKernelDepth && nInputRows >= nKernelRows && nInputCols >= nKernelCols) || *vf == 'F', 2, "conv3Dcmul : Input image is smaller than kernel"); - - nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); - nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); - nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); - - nelem = THTensor_(nElement)(r_); - THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - THTensor_(zero)(r_); - } - else if (beta != 1) - THTensor_(mul)(r_, r_, beta); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - for(k = 0; k < nOutputPlane; k++) - { - /* get kernel */ - real *ptr_weight = weight_data + k*kstride0; - /* get input */ - real *ptr_input = input_data + k*istride0; - - /* do image, kernel convolution */ - THTensor_(conv3d)(output_data, - alpha, - ptr_input, nInputDepth, nInputRows, nInputCols, - ptr_weight, nKernelDepth, nKernelRows, nKernelCols, - sdepth, srow, scol, vf, xc); - - /* Next output plane */ - output_data += nOutputDepth*nOutputCols*nOutputRows; - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} - -/* - 4D input, 4D kernel, 4D output - component wise multiplication like with a permutation map - y <- y.*x + beta*y -*/ -void THTensor_(conv3Dmap)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, THTensor *map, - long sdepth, long srow, long scol, const char *vf, const char *xc) -{ - long nInputPlane, nInputDepth, nInputRows, nInputCols; - long nKernelDepth, nKernelRows, nKernelCols; - long nOutputPlane, nOutputDepth, nOutputRows, nOutputCols; - long istride0, kstride0; - - THTensor *input; - THTensor *kernel; - ptrdiff_t nelem; - real *input_data; - real *weight_data; - real *output_data; - long nmaps; - long k; - - THArgCheck(t_->nDimension == 4 , 3, "input: 4D Tensor expected"); - THArgCheck(k_->nDimension == 4 , 4, "kernel: 4D Tensor expected"); - THArgCheck(map->nDimension == 2 , 4, "map: 2D Tensor expected"); - THArgCheck(srow >= 1, 6, "Stride should be a positive integer"); - THArgCheck(scol >= 1, 7, "Stride should be a positive integer"); - THArgCheck(*vf == 'V' || *vf == 'F', 8, "type of convolution can 'V' or 'F'"); - THArgCheck(*xc == 'C' || *xc == 'X', 8, "type of convolution can 'X' or 'C'"); - - input = THTensor_(newContiguous)(t_); - kernel = THTensor_(newContiguous)(k_); - - istride0 = input->stride[0]; - nInputPlane = input->size[0]; - nInputDepth = input->size[1]; - nInputRows = input->size[2]; - nInputCols = input->size[3]; - - kstride0 = kernel->stride[0]; - nOutputPlane = kernel->size[0]; - nKernelDepth = kernel->size[1]; - nKernelRows = kernel->size[2]; - nKernelCols = kernel->size[3]; - - THArgCheck(nOutputPlane == nInputPlane, 2, "invalid number of input/kernel planes"); - THArgCheck((nInputDepth >= nKernelDepth - && nInputRows >= nKernelRows - && nInputCols >= nKernelCols) || *vf == 'F', - 2, "conv3Dmap : Input image is smaller than kernel"); - - nOutputDepth = THTensor_(convsize)(nInputDepth, nKernelDepth, sdepth, vf); - nOutputRows = THTensor_(convsize)(nInputRows, nKernelRows, srow, vf); - nOutputCols = THTensor_(convsize)(nInputCols, nKernelCols, scol, vf); - - nelem = THTensor_(nElement)(r_); - THTensor_(resize4d)(r_, nOutputPlane, nOutputDepth, nOutputRows, nOutputCols); - - if (nelem == 0 || beta == 0 || nelem != THTensor_(nElement)(r_)) - { - THTensor_(zero)(r_); - } - else if (beta != 1) - THTensor_(mul)(r_, r_, beta); - - input_data = THTensor_(data)(input); - weight_data = THTensor_(data)(kernel); - output_data = THTensor_(data)(r_); - - nmaps = map->size[0]; - - for(k = 0; k < nmaps; k++) - { - /* get indices */ - long from = (long)THTensor_(get2d)(map,k,0)-1; - long to = (long)THTensor_(get2d)(map,k,1)-1; - - /* get kernel */ - real *ptr_weight = weight_data + k*kstride0; - /* get input */ - real *ptr_input = input_data + from*istride0; - /* get output */ - real *ptr_output = output_data + to*nOutputDepth*nOutputRows*nOutputCols; - - /* do image, kernel convolution */ - THTensor_(conv3d)(ptr_output, - alpha, - ptr_input, nInputDepth, nInputRows, nInputCols, - ptr_weight, nKernelDepth, nKernelRows, nKernelCols, - sdepth, srow, scol, vf, xc); - } - THTensor_(free)(input); - THTensor_(free)(kernel); -} -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensorConv.h b/contrib/lua-torch/torch7/lib/TH/generic/THTensorConv.h deleted file mode 100644 index 79866f3901..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensorConv.h +++ /dev/null @@ -1,79 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensorConv.h" -#else - -TH_API void THTensor_(validXCorr2Dptr)(real *r_, - real alpha, - real *t_, long ir, long ic, - real *k_, long kr, long kc, - long sr, long sc); - -TH_API void THTensor_(validConv2Dptr)(real *r_, - real alpha, - real *t_, long ir, long ic, - real *k_, long kr, long kc, - long sr, long sc); - -TH_API void THTensor_(fullXCorr2Dptr)(real *r_, - real alpha, - real *t_, long ir, long ic, - real *k_, long kr, long kc, - long sr, long sc); - -TH_API void THTensor_(fullConv2Dptr)(real *r_, - real alpha, - real *t_, long ir, long ic, - real *k_, long kr, long kc, - long sr, long sc); - -TH_API void THTensor_(validXCorr2DRevptr)(real *r_, - real alpha, - real *t_, long ir, long ic, - real *k_, long kr, long kc, - long sr, long sc); - -TH_API void THTensor_(conv2DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol); -TH_API void THTensor_(conv2DRevgerm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol); -TH_API void THTensor_(conv2Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc); -TH_API void THTensor_(conv2Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc); -TH_API void THTensor_(conv2Dmm)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc); -TH_API void THTensor_(conv2Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc); -TH_API void THTensor_(conv2Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long srow, long scol, const char *vf, const char *xc); - -TH_API void THTensor_(validXCorr3Dptr)(real *r_, - real alpha, - real *t_, long it, long ir, long ic, - real *k_, long kt, long kr, long kc, - long st, long sr, long sc); - -TH_API void THTensor_(validConv3Dptr)(real *r_, - real alpha, - real *t_, long it, long ir, long ic, - real *k_, long kt, long kr, long kc, - long st, long sr, long sc); - -TH_API void THTensor_(fullXCorr3Dptr)(real *r_, - real alpha, - real *t_, long it, long ir, long ic, - real *k_, long kt, long kr, long kc, - long st, long sr, long sc); - -TH_API void THTensor_(fullConv3Dptr)(real *r_, - real alpha, - real *t_, long it, long ir, long ic, - real *k_, long kt, long kr, long kc, - long st, long sr, long sc); - -TH_API void THTensor_(validXCorr3DRevptr)(real *r_, - real alpha, - real *t_, long it, long ir, long ic, - real *k_, long kt, long kr, long kc, - long st, long sr, long sc); - -TH_API void THTensor_(conv3DRevger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol); -TH_API void THTensor_(conv3Dger)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc); -TH_API void THTensor_(conv3Dmv)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc); -TH_API void THTensor_(conv3Dmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc); -TH_API void THTensor_(conv3Dcmul)(THTensor *r_, real beta, real alpha, THTensor *t_, THTensor *k_, long sdepth, long srow, long scol, const char *vf, const char *xc); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensorCopy.c b/contrib/lua-torch/torch7/lib/TH/generic/THTensorCopy.c deleted file mode 100644 index d9cd1c0d50..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensorCopy.c +++ /dev/null @@ -1,136 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensorCopy.c" -#else - -int THTensor_(copyTransposeValid)(THTensor *tensor, THTensor *src) { - const int MIN_SZ = 60 * 60; - return THTensor_(isContiguous)(tensor) && - THTensor_(nDimension)(src) == 2 && - THTensor_(stride)(src, 0) == 1 && - THTensor_(stride)(src, 1) == THTensor_(size)(src, 0) && - THTensor_(nElement)(tensor) >= MIN_SZ; -} - -// special case copy where tensor is contiguous and src is a transposed matrix -// This can be generalized to most copies, but it's tricker -void THTensor_(copyTranspose)(THTensor *tensor, THTensor *src) { - #define MIN(x, y) (((x) < (y)) ? (x) : (y)) - #define MAX(x, y) (((x) > (y)) ? (x) : (y)) - -#ifdef TH_REAL_IS_BYTE - const int BLOCK_SZ = 120; -#else - const int BLOCK_SZ = 60; -#endif - - THTensor *buf = THTensor_(newWithSize2d)(BLOCK_SZ, BLOCK_SZ); - real *sp = THTensor_(data)(src); - real *rp = THTensor_(data)(tensor); - real *bp = THTensor_(data)(buf); - - long NR = THTensor_(size)(src, 0); - long NC = THTensor_(size)(src, 1); - for (long R = 0; R < NR; R += BLOCK_SZ) { - for (long C = 0; C < NC; C += BLOCK_SZ) { - real *spo = sp + R + C * NR; - real *rpo = rp + C + R * NC; - - int nr = MIN(NR - R, BLOCK_SZ); - int nc = MIN(NC - C, BLOCK_SZ); - - // 1. copy columns from src to buf - for (int c = 0; c < nc; c++) { - memcpy(bp + c * BLOCK_SZ, spo + c * NR, nr * sizeof(real)); - } - - // 2. transpose buf in place - int rc_max = MAX(nr, nc); - int rc_min = MIN(nr, nc); - for (int r = 0; r < rc_max; r++) { - int end = MIN(r, rc_min); - for (int c = 0; c < end; c++) { - real tmp = bp[r + BLOCK_SZ * c]; - bp[r + BLOCK_SZ * c] = bp[r * BLOCK_SZ + c]; - bp[r * BLOCK_SZ + c] = tmp; - } - } - - // 3. copy rows from buf to dst - for (int r = 0; r < nr; r++) { - memcpy(rpo + r * NC, bp + r * BLOCK_SZ, nc * sizeof(real)); - } - } - } - THTensor_(free)(buf); - #undef MIN - #undef MAX -} - -void THTensor_(copy)(THTensor *tensor, THTensor *src) -{ - if (tensor == src) return; - if (THTensor_(isContiguous)(tensor) && THTensor_(isContiguous)(src) && THTensor_(nElement)(tensor) == THTensor_(nElement)(src)) { - real *sp = THTensor_(data)(src); - real *rp = THTensor_(data)(tensor); - ptrdiff_t sz = THTensor_(nElement)(tensor); -#ifndef TH_REAL_IS_HALF - THVector_(copy)(rp, sp, sz); -#else - memcpy(rp, sp, sz * sizeof(real)); -#endif -#ifndef TH_REAL_IS_HALF - } else if (THTensor_(copyTransposeValid)(tensor, src)) { - THTensor_(copyTranspose)(tensor, src); -#endif - } else { - TH_TENSOR_APPLY2(real, tensor, real, src, *tensor_data = *src_data;) - } -} - -#define IMPLEMENT_THTensor_COPY(TYPENAMESRC, TYPE_SRC) \ -void THTensor_(copy##TYPENAMESRC)(THTensor *tensor, TH##TYPENAMESRC##Tensor *src) \ -{ \ - TH_TENSOR_APPLY2(real, tensor, TYPE_SRC, src, *tensor_data = (real)(*src_data);) \ -} - -#define IMPLEMENT_THTensor_COPY_TO_HALF(TYPENAMESRC, TYPE_SRC) \ -void THTensor_(copy##TYPENAMESRC)(THTensor *tensor, TH##TYPENAMESRC##Tensor *src) \ -{ \ - TH_TENSOR_APPLY2(real, tensor, TYPE_SRC, src, *tensor_data = TH_float2half((float)*src_data);) \ -} - -#define IMPLEMENT_THTensor_COPY_FROM_HALF(TYPENAMESRC, TYPE_SRC) \ -void THTensor_(copy##TYPENAMESRC)(THTensor *tensor, TH##TYPENAMESRC##Tensor *src) \ -{ \ - TH_TENSOR_APPLY2(real, tensor, TYPE_SRC, src, *tensor_data = (real)TH_half2float(*src_data);) \ -} - -#define IMPLEMENT_THTensor_COPY_TO_FROM_HALF(TYPENAMESRC, TYPE_SRC) \ -void THTensor_(copy##TYPENAMESRC)(THTensor *tensor, TH##TYPENAMESRC##Tensor *src) \ -{ \ - TH_TENSOR_APPLY2(real, tensor, TYPE_SRC, src, *tensor_data = *src_data;) \ -} - -#ifndef TH_REAL_IS_HALF -IMPLEMENT_THTensor_COPY(Byte, unsigned char) -IMPLEMENT_THTensor_COPY(Char, char) -IMPLEMENT_THTensor_COPY(Short, short) -IMPLEMENT_THTensor_COPY(Int, int) -IMPLEMENT_THTensor_COPY(Long, long) -IMPLEMENT_THTensor_COPY(Float, float) -IMPLEMENT_THTensor_COPY(Double, double) -IMPLEMENT_THTensor_COPY_FROM_HALF(Half, THHalf) -#else -/* only allow pass-through for Half */ -IMPLEMENT_THTensor_COPY_TO_FROM_HALF(Half, THHalf) -IMPLEMENT_THTensor_COPY_TO_HALF(Byte, unsigned char) -IMPLEMENT_THTensor_COPY_TO_HALF(Char, char) -IMPLEMENT_THTensor_COPY_TO_HALF(Short, short) -IMPLEMENT_THTensor_COPY_TO_HALF(Int, int) -IMPLEMENT_THTensor_COPY_TO_HALF(Long, long) -IMPLEMENT_THTensor_COPY_TO_HALF(Float, float) -IMPLEMENT_THTensor_COPY_TO_HALF(Double, double) - -#endif /* REAL_IS_HALF */ - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensorCopy.h b/contrib/lua-torch/torch7/lib/TH/generic/THTensorCopy.h deleted file mode 100644 index b9e5bfc998..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensorCopy.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensorCopy.h" -#else - -/* Support for copy between different Tensor types */ - -TH_API void THTensor_(copy)(THTensor *tensor, THTensor *src); -TH_API void THTensor_(copyByte)(THTensor *tensor, struct THByteTensor *src); -TH_API void THTensor_(copyChar)(THTensor *tensor, struct THCharTensor *src); -TH_API void THTensor_(copyShort)(THTensor *tensor, struct THShortTensor *src); -TH_API void THTensor_(copyInt)(THTensor *tensor, struct THIntTensor *src); -TH_API void THTensor_(copyLong)(THTensor *tensor, struct THLongTensor *src); -TH_API void THTensor_(copyFloat)(THTensor *tensor, struct THFloatTensor *src); -TH_API void THTensor_(copyDouble)(THTensor *tensor, struct THDoubleTensor *src); -TH_API void THTensor_(copyHalf)(THTensor *tensor, struct THHalfTensor *src); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensorLapack.c b/contrib/lua-torch/torch7/lib/TH/generic/THTensorLapack.c deleted file mode 100644 index d4e52f6d7b..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensorLapack.c +++ /dev/null @@ -1,1121 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensorLapack.c" -#else - -/* -Check if self is transpose of a contiguous matrix -*/ -static int THTensor_(isTransposedContiguous)(THTensor *self) -{ - return self->stride[0] == 1 && self->stride[1] == self->size[0]; -} -/* -If a matrix is a regular contiguous matrix, make sure it is transposed -because this is what we return from Lapack calls. -*/ -static void THTensor_(checkTransposed)(THTensor *self) -{ - if(THTensor_(isContiguous)(self)) - THTensor_(transpose)(self, NULL, 0, 1); - return; -} -/* -newContiguous followed by transpose -Similar to (newContiguous), but checks if the transpose of the matrix -is contiguous and also limited to 2D matrices. -*/ -static THTensor *THTensor_(newTransposedContiguous)(THTensor *self) -{ - THTensor *tensor; - if(THTensor_(isTransposedContiguous)(self)) - { - THTensor_(retain)(self); - tensor = self; - } - else - { - tensor = THTensor_(newContiguous)(self); - THTensor_(transpose)(tensor, NULL, 0, 1); - } - - return tensor; -} - -/* -Given the result tensor and src tensor, decide if the lapack call should use the -provided result tensor or should allocate a new space to put the result in. - -The returned tensor have to be freed by the calling function. - -nrows is required, because some lapack calls, require output space smaller than -input space, like underdetermined gels. -*/ -static THTensor *THTensor_(checkLapackClone)(THTensor *result, THTensor *src, int nrows) -{ - /* check if user wants to reuse src and if it is correct shape/size */ - if (src == result && THTensor_(isTransposedContiguous)(src) && src->size[1] == nrows) - THTensor_(retain)(result); - else if(src == result || result == NULL) /* in this case, user wants reuse of src, but its structure is not OK */ - result = THTensor_(new)(); - else - THTensor_(retain)(result); - return result; -} - -/* -Same as cloneColumnMajor, but accepts nrows argument, because some lapack calls require -the resulting tensor to be larger than src. -*/ -static THTensor *THTensor_(cloneColumnMajorNrows)(THTensor *self, THTensor *src, int nrows) -{ - THTensor *result; - THTensor *view; - - if (src == NULL) - src = self; - result = THTensor_(checkLapackClone)(self, src, nrows); - if (src == result) - return result; - - THTensor_(resize2d)(result, src->size[1], nrows); - THTensor_(checkTransposed)(result); - - if (src->size[0] == nrows) - THTensor_(copy)(result, src); - else - { - view = THTensor_(newNarrow)(result, 0, 0, src->size[0]); - THTensor_(copy)(view, src); - THTensor_(free)(view); - } - return result; -} - -/* -Create a clone of src in self column major order for use with Lapack. -If src == self, a new tensor is allocated, in any case, the return tensor should be -freed by calling function. -*/ -static THTensor *THTensor_(cloneColumnMajor)(THTensor *self, THTensor *src) -{ - return THTensor_(cloneColumnMajorNrows)(self, src, src->size[0]); -} - -void THTensor_(gesv)(THTensor *rb_, THTensor *ra_, THTensor *b, THTensor *a) -{ - int free_b = 0; - if (a == NULL) a = ra_; - if (b == NULL) b = rb_; - THArgCheck(a->nDimension == 2, 2, "A should have 2 dimensions, but has %d", - a->nDimension); - THArgCheck(b->nDimension == 1 || b->nDimension == 2, 1, "B should have 1 or 2 " - "dimensions, but has %d", b->nDimension); - THArgCheck(a->size[0] == a->size[1], 2, "A should be square, but is %ldx%ld", - a->size[0], a->size[1]); - THArgCheck(a->size[0] == b->size[0], 2, "A,B size incompatible - A has %ld " - "rows, B has %ld", a->size[0], b->size[0]); - - if (b->nDimension == 1) { - b = THTensor_(newWithStorage2d)(b->storage, b->storageOffset, b->size[0], - b->stride[0], 1, 0); - free_b = 1; - } - - int n, nrhs, lda, ldb, info; - THIntTensor *ipiv; - THTensor *ra__; // working version of A matrix to be passed into lapack GELS - THTensor *rb__; // working version of B matrix to be passed into lapack GELS - - ra__ = THTensor_(cloneColumnMajor)(ra_, a); - rb__ = THTensor_(cloneColumnMajor)(rb_, b); - - n = (int)ra__->size[0]; - nrhs = (int)rb__->size[1]; - lda = n; - ldb = n; - - ipiv = THIntTensor_newWithSize1d((long)n); - THLapack_(gesv)(n, nrhs, - THTensor_(data)(ra__), lda, THIntTensor_data(ipiv), - THTensor_(data)(rb__), ldb, &info); - - THLapackCheckWithCleanup("Lapack Error in %s : U(%d,%d) is zero, singular U.", - THCleanup( - THTensor_(free)(ra__); - THTensor_(free)(rb__); - THIntTensor_free(ipiv); - if (free_b) THTensor_(free)(b);), - "gesv", info, info); - - THTensor_(freeCopyTo)(ra__, ra_); - THTensor_(freeCopyTo)(rb__, rb_); - THIntTensor_free(ipiv); - if (free_b) THTensor_(free)(b); -} - -void THTensor_(trtrs)(THTensor *rb_, THTensor *ra_, THTensor *b, THTensor *a, - const char *uplo, const char *trans, const char *diag) -{ - int free_b = 0; - if (a == NULL) a = ra_; - if (b == NULL) b = rb_; - THArgCheck(a->nDimension == 2, 2, "A should have 2 dimensions, but has %d", - a->nDimension); - THArgCheck(b->nDimension == 1 || b->nDimension == 2, 1, "B should have 1 or 2 " - "dimensions, but has %d", b->nDimension); - THArgCheck(a->size[0] == a->size[1], 2, "A should be square, but is %ldx%ld", - a->size[0], a->size[1]); - THArgCheck(a->size[0] == b->size[0], 2, "A,B size incompatible - A has %ld " - "rows, B has %ld", a->size[0], b->size[0]); - - if (b->nDimension == 1) { - b = THTensor_(newWithStorage2d)(b->storage, b->storageOffset, b->size[0], - b->stride[0], 1, 0); - free_b = 1; - } - - int n, nrhs, lda, ldb, info; - THTensor *ra__; // working version of A matrix to be passed into lapack TRTRS - THTensor *rb__; // working version of B matrix to be passed into lapack TRTRS - - ra__ = THTensor_(cloneColumnMajor)(ra_, a); - rb__ = THTensor_(cloneColumnMajor)(rb_, b); - - n = (int)ra__->size[0]; - nrhs = (int)rb__->size[1]; - lda = n; - ldb = n; - - THLapack_(trtrs)(uplo[0], trans[0], diag[0], n, nrhs, - THTensor_(data)(ra__), lda, - THTensor_(data)(rb__), ldb, &info); - - - THLapackCheckWithCleanup("Lapack Error in %s : A(%d,%d) is zero, singular A", - THCleanup( - THTensor_(free)(ra__); - THTensor_(free)(rb__); - if (free_b) THTensor_(free)(b);), - "trtrs", info, info); - - THTensor_(freeCopyTo)(ra__, ra_); - THTensor_(freeCopyTo)(rb__, rb_); - if (free_b) THTensor_(free)(b); -} - -void THTensor_(gels)(THTensor *rb_, THTensor *ra_, THTensor *b, THTensor *a) -{ - int free_b = 0; - // Note that a = NULL is interpreted as a = ra_, and b = NULL as b = rb_. - if (a == NULL) a = ra_; - if (b == NULL) b = rb_; - THArgCheck(a->nDimension == 2, 2, "A should have 2 dimensions, but has %d", - a->nDimension); - THArgCheck(b->nDimension == 1 || b->nDimension == 2, 1, "B should have 1 or 2 " - "dimensions, but has %d", b->nDimension); - THArgCheck(a->size[0] == b->size[0], 2, "A,B size incompatible - A has %ld " - "rows, B has %ld", a->size[0], b->size[0]); - - if (b->nDimension == 1) { - b = THTensor_(newWithStorage2d)(b->storage, b->storageOffset, b->size[0], - b->stride[0], 1, 0); - free_b = 1; - } - - int m, n, nrhs, lda, ldb, info, lwork; - THTensor *work = NULL; - real wkopt = 0; - - THTensor *ra__ = NULL; // working version of A matrix to be passed into lapack GELS - THTensor *rb__ = NULL; // working version of B matrix to be passed into lapack GELS - - ra__ = THTensor_(cloneColumnMajor)(ra_, a); - - m = ra__->size[0]; - n = ra__->size[1]; - lda = m; - ldb = (m > n) ? m : n; - - rb__ = THTensor_(cloneColumnMajorNrows)(rb_, b, ldb); - - nrhs = rb__->size[1]; - info = 0; - - - /* get optimal workspace size */ - THLapack_(gels)('N', m, n, nrhs, THTensor_(data)(ra__), lda, - THTensor_(data)(rb__), ldb, - &wkopt, -1, &info); - lwork = (int)wkopt; - work = THTensor_(newWithSize1d)(lwork); - THLapack_(gels)('N', m, n, nrhs, THTensor_(data)(ra__), lda, - THTensor_(data)(rb__), ldb, - THTensor_(data)(work), lwork, &info); - - THLapackCheckWithCleanup("Lapack Error in %s : The %d-th diagonal element of the triangular factor of A is zero", - THCleanup(THTensor_(free)(ra__); - THTensor_(free)(rb__); - THTensor_(free)(work); - if (free_b) THTensor_(free)(b);), - "gels", info,""); - - /* rb__ is currently ldb by nrhs; resize it to n by nrhs */ - rb__->size[0] = n; - if (rb__ != rb_) - THTensor_(resize2d)(rb_, n, nrhs); - - THTensor_(freeCopyTo)(ra__, ra_); - THTensor_(freeCopyTo)(rb__, rb_); - THTensor_(free)(work); - if (free_b) THTensor_(free)(b); -} - -void THTensor_(geev)(THTensor *re_, THTensor *rv_, THTensor *a_, const char *jobvr) -{ - int n, lda, lwork, info, ldvr; - THTensor *work, *wi, *wr, *a; - real wkopt; - real *rv_data; - long i; - - THTensor *re__ = NULL; - THTensor *rv__ = NULL; - - THArgCheck(a_->nDimension == 2, 1, "A should be 2 dimensional"); - THArgCheck(a_->size[0] == a_->size[1], 1,"A should be square"); - - /* we want to definitely clone a_ for geev*/ - a = THTensor_(cloneColumnMajor)(NULL, a_); - - n = a->size[0]; - lda = n; - - wi = THTensor_(newWithSize1d)(n); - wr = THTensor_(newWithSize1d)(n); - - rv_data = NULL; - ldvr = 1; - if (*jobvr == 'V') - { - THTensor_(resize2d)(rv_,n,n); - /* guard against someone passing a correct size, but wrong stride */ - rv__ = THTensor_(newTransposedContiguous)(rv_); - rv_data = THTensor_(data)(rv__); - ldvr = n; - } - THTensor_(resize2d)(re_,n,2); - re__ = THTensor_(newContiguous)(re_); - - /* get optimal workspace size */ - THLapack_(geev)('N', jobvr[0], n, THTensor_(data)(a), lda, THTensor_(data)(wr), THTensor_(data)(wi), - NULL, 1, rv_data, ldvr, &wkopt, -1, &info); - - lwork = (int)wkopt; - work = THTensor_(newWithSize1d)(lwork); - - THLapack_(geev)('N', jobvr[0], n, THTensor_(data)(a), lda, THTensor_(data)(wr), THTensor_(data)(wi), - NULL, 1, rv_data, ldvr, THTensor_(data)(work), lwork, &info); - - THLapackCheckWithCleanup(" Lapack Error in %s : %d off-diagonal elements of an didn't converge to zero", - THCleanup(THTensor_(free)(re__); - THTensor_(free)(rv__); - THTensor_(free)(a); - THTensor_(free)(wi); - THTensor_(free)(wr); - THTensor_(free)(work);), - "geev", info,""); - - { - real *re_data = THTensor_(data)(re__); - real *wi_data = THTensor_(data)(wi); - real *wr_data = THTensor_(data)(wr); - for (i=0; inDimension == 2, 1, "A should be 2 dimensional"); - THArgCheck(a->size[0] == a->size[1], 1,"A should be square"); - - int n, lda, lwork, info; - THTensor *work; - real wkopt; - - THTensor *rv__ = NULL; - THTensor *re__ = NULL; - - rv__ = THTensor_(cloneColumnMajor)(rv_, a); - - n = rv__->size[0]; - lda = n; - - THTensor_(resize1d)(re_,n); - re__ = THTensor_(newContiguous)(re_); - - /* get optimal workspace size */ - THLapack_(syev)(jobz[0], uplo[0], n, THTensor_(data)(rv__), lda, - THTensor_(data)(re_), &wkopt, -1, &info); - lwork = (int)wkopt; - work = THTensor_(newWithSize1d)(lwork); - THLapack_(syev)(jobz[0], uplo[0], n, THTensor_(data)(rv__), lda, - THTensor_(data)(re_), THTensor_(data)(work), lwork, &info); - - THLapackCheckWithCleanup("Lapack Error %s : %d off-diagonal elements didn't converge to zero", - THCleanup(THTensor_(free)(rv__); - THTensor_(free)(re__); - THTensor_(free)(work);), - "syev", info,""); - - THTensor_(freeCopyTo)(rv__, rv_); - THTensor_(freeCopyTo)(re__, re_); - THTensor_(free)(work); -} - -void THTensor_(gesvd)(THTensor *ru_, THTensor *rs_, THTensor *rv_, THTensor *a, const char* jobu) -{ - THTensor *ra_ = THTensor_(new)(); - THTensor_(gesvd2)(ru_, rs_, rv_, ra_, a, jobu); - THTensor_(free)(ra_); -} - -void THTensor_(gesvd2)(THTensor *ru_, THTensor *rs_, THTensor *rv_, THTensor *ra_, THTensor *a, const char* jobu) -{ - if (a == NULL) a = ra_; - THArgCheck(a->nDimension == 2, 1, "A should be 2 dimensional"); - - int k,m, n, lda, ldu, ldvt, lwork, info; - THTensor *work; - THTensor *rvf_ = THTensor_(new)(); - real wkopt; - - THTensor *ra__ = NULL; - THTensor *ru__ = NULL; - THTensor *rs__ = NULL; - THTensor *rv__ = NULL; - - ra__ = THTensor_(cloneColumnMajor)(ra_, a); - - m = ra__->size[0]; - n = ra__->size[1]; - k = (m < n ? m : n); - - lda = m; - ldu = m; - ldvt = n; - - THTensor_(resize1d)(rs_,k); - THTensor_(resize2d)(rvf_,ldvt,n); - if (*jobu == 'A') - THTensor_(resize2d)(ru_,m,ldu); - else - THTensor_(resize2d)(ru_,k,ldu); - - THTensor_(checkTransposed)(ru_); - - /* guard against someone passing a correct size, but wrong stride */ - ru__ = THTensor_(newTransposedContiguous)(ru_); - rs__ = THTensor_(newContiguous)(rs_); - rv__ = THTensor_(newContiguous)(rvf_); - - THLapack_(gesvd)(jobu[0],jobu[0], - m,n,THTensor_(data)(ra__),lda, - THTensor_(data)(rs__), - THTensor_(data)(ru__), - ldu, - THTensor_(data)(rv__), ldvt, - &wkopt, -1, &info); - lwork = (int)wkopt; - work = THTensor_(newWithSize1d)(lwork); - THLapack_(gesvd)(jobu[0],jobu[0], - m,n,THTensor_(data)(ra__),lda, - THTensor_(data)(rs__), - THTensor_(data)(ru__), - ldu, - THTensor_(data)(rv__), ldvt, - THTensor_(data)(work),lwork, &info); - - THLapackCheckWithCleanup(" Lapack Error %s : %d superdiagonals failed to converge.", - THCleanup( - THTensor_(free)(ru__); - THTensor_(free)(rs__); - THTensor_(free)(rv__); - THTensor_(free)(ra__); - THTensor_(free)(work);), - "gesvd", info,""); - - if (*jobu == 'S') - THTensor_(narrow)(rv__,NULL,1,0,k); - - THTensor_(freeCopyTo)(ru__, ru_); - THTensor_(freeCopyTo)(rs__, rs_); - THTensor_(freeCopyTo)(rv__, rvf_); - THTensor_(freeCopyTo)(ra__, ra_); - THTensor_(free)(work); - - if (*jobu == 'S') { - THTensor_(narrow)(rvf_,NULL,1,0,k); - } - THTensor_(resizeAs)(rv_, rvf_); - THTensor_(copy)(rv_, rvf_); - THTensor_(free)(rvf_); -} - -void THTensor_(getri)(THTensor *ra_, THTensor *a) -{ - if (a == NULL) a = ra_; - THArgCheck(a->nDimension == 2, 1, "A should be 2 dimensional"); - THArgCheck(a->size[0] == a->size[1], 1, "A should be square"); - - int m, n, lda, info, lwork; - real wkopt; - THIntTensor *ipiv; - THTensor *work; - THTensor *ra__ = NULL; - - ra__ = THTensor_(cloneColumnMajor)(ra_, a); - - m = ra__->size[0]; - n = ra__->size[1]; - lda = m; - ipiv = THIntTensor_newWithSize1d((long)m); - - /* Run LU */ - THLapack_(getrf)(n, n, THTensor_(data)(ra__), lda, THIntTensor_data(ipiv), &info); - THLapackCheckWithCleanup("Lapack Error %s : U(%d,%d) is 0, U is singular", - THCleanup( - THTensor_(free)(ra__); - THIntTensor_free(ipiv);), - "getrf", info, info); - - /* Run inverse */ - THLapack_(getri)(n, THTensor_(data)(ra__), lda, THIntTensor_data(ipiv), &wkopt, -1, &info); - lwork = (int)wkopt; - work = THTensor_(newWithSize1d)(lwork); - THLapack_(getri)(n, THTensor_(data)(ra__), lda, THIntTensor_data(ipiv), THTensor_(data)(work), lwork, &info); - THLapackCheckWithCleanup("Lapack Error %s : U(%d,%d) is 0, U is singular", - THCleanup( - THTensor_(free)(ra__); - THTensor_(free)(work); - THIntTensor_free(ipiv);), - "getri", info, info); - - THTensor_(freeCopyTo)(ra__, ra_); - THTensor_(free)(work); - THIntTensor_free(ipiv); -} - -void THTensor_(clearUpLoTriangle)(THTensor *a, const char *uplo) -{ - THArgCheck(a->nDimension == 2, 1, "A should be 2 dimensional"); - THArgCheck(a->size[0] == a->size[1], 1, "A should be square"); - - int n = a->size[0]; - - /* Build full matrix */ - real *p = THTensor_(data)(a); - long i, j; - - /* Upper Triangular Case */ - if (uplo[0] == 'U') - { - /* Clear lower triangle (excluding diagonals) */ - for (i=0; inDimension == 2, 1, "A should be 2 dimensional"); - THArgCheck(a->size[0] == a->size[1], 1, "A should be square"); - - int n = a->size[0]; - - /* Build full matrix */ - real *p = THTensor_(data)(a); - long i, j; - - /* Upper Triangular Case */ - if (uplo[0] == 'U') - { - /* Clear lower triangle (excluding diagonals) */ - for (i=0; inDimension == 2, 1, "A should be 2 dimensional"); - THArgCheck(a->size[0] == a->size[1], 1, "A should be square"); - - int n, lda, info; - THTensor *ra__ = NULL; - - ra__ = THTensor_(cloneColumnMajor)(ra_, a); - - n = ra__->size[0]; - lda = n; - - /* Run Factorization */ - THLapack_(potrf)(uplo[0], n, THTensor_(data)(ra__), lda, &info); - THLapackCheckWithCleanup("Lapack Error in %s : the leading minor of order %d is not positive definite", - THCleanup(THTensor_(free)(ra__);), - "potrf", info, ""); - - THTensor_(clearUpLoTriangle)(ra__, uplo); - THTensor_(freeCopyTo)(ra__, ra_); -} - -void THTensor_(potrs)(THTensor *rb_, THTensor *b, THTensor *a, const char *uplo) -{ - int free_b = 0; - if (b == NULL) b = rb_; - - THArgCheck(a->nDimension == 2, 2, "A should have 2 dimensions, but has %d", - a->nDimension); - THArgCheck(b->nDimension == 1 || b->nDimension == 2, 1, "B should have 1 or 2 " - "dimensions, but has %d", b->nDimension); - THArgCheck(a->size[0] == a->size[1], 2, "A should be square, but is %ldx%ld", - a->size[0], a->size[1]); - THArgCheck(a->size[0] == b->size[0], 2, "A,B size incompatible - A has %ld " - "rows, B has %ld", a->size[0], b->size[0]); - - if (b->nDimension == 1) { - b = THTensor_(newWithStorage2d)(b->storage, b->storageOffset, b->size[0], - b->stride[0], 1, 0); - free_b = 1; - } - - int n, nrhs, lda, ldb, info; - THTensor *ra__; // working version of A matrix to be passed into lapack TRTRS - THTensor *rb__; // working version of B matrix to be passed into lapack TRTRS - - ra__ = THTensor_(cloneColumnMajor)(NULL, a); - rb__ = THTensor_(cloneColumnMajor)(rb_, b); - - n = (int)ra__->size[0]; - nrhs = (int)rb__->size[1]; - lda = n; - ldb = n; - - THLapack_(potrs)(uplo[0], n, nrhs, THTensor_(data)(ra__), - lda, THTensor_(data)(rb__), ldb, &info); - - - THLapackCheckWithCleanup("Lapack Error in %s : A(%d,%d) is zero, singular A", - THCleanup( - THTensor_(free)(ra__); - THTensor_(free)(rb__); - if (free_b) THTensor_(free)(b);), - "potrs", info, info); - - if (free_b) THTensor_(free)(b); - THTensor_(free)(ra__); - THTensor_(freeCopyTo)(rb__, rb_); -} - -void THTensor_(potri)(THTensor *ra_, THTensor *a, const char *uplo) -{ - if (a == NULL) a = ra_; - THArgCheck(a->nDimension == 2, 1, "A should be 2 dimensional"); - THArgCheck(a->size[0] == a->size[1], 1, "A should be square"); - - int n, lda, info; - THTensor *ra__ = NULL; - - ra__ = THTensor_(cloneColumnMajor)(ra_, a); - - n = ra__->size[0]; - lda = n; - - /* Run inverse */ - THLapack_(potri)(uplo[0], n, THTensor_(data)(ra__), lda, &info); - THLapackCheckWithCleanup("Lapack Error %s : A(%d,%d) is 0, A cannot be factorized", - THCleanup(THTensor_(free)(ra__);), - "potri", info, info); - - THTensor_(copyUpLoTriangle)(ra__, uplo); - THTensor_(freeCopyTo)(ra__, ra_); -} - -/* - Computes the Cholesky factorization with complete pivoting of a real symmetric - positive semidefinite matrix. - - Args: - * `ra_` - result Tensor in which to store the factor U or L from the - Cholesky factorization. - * `rpiv_` - result IntTensor containing sparse permutation matrix P, encoded - as P[rpiv_[k], k] = 1. - * `a` - input Tensor; the input matrix to factorize. - * `uplo` - string; specifies whether the upper or lower triangular part of - the symmetric matrix A is stored. "U"/"L" for upper/lower - triangular. - * `tol` - double; user defined tolerance, or < 0 for automatic choice. - The algorithm terminates when the pivot <= tol. - */ -void THTensor_(pstrf)(THTensor *ra_, THIntTensor *rpiv_, THTensor *a, const char *uplo, real tol) { - THArgCheck(a->nDimension == 2, 1, "A should be 2 dimensional"); - THArgCheck(a->size[0] == a->size[1], 1, "A should be square"); - - int n = a->size[0]; - - THTensor *ra__ = THTensor_(cloneColumnMajor)(ra_, a); - THIntTensor_resize1d(rpiv_, n); - - // Allocate working tensor - THTensor *work = THTensor_(newWithSize1d)(2 * n); - - // Run Cholesky factorization - int lda = n; - int rank, info; - - THLapack_(pstrf)(uplo[0], n, THTensor_(data)(ra__), lda, - THIntTensor_data(rpiv_), &rank, tol, - THTensor_(data)(work), &info); - - THLapackCheckWithCleanup("Lapack Error %s : matrix is rank deficient or not positive semidefinite", - THCleanup( - THTensor_(free)(ra__); - THTensor_(free)(work);), - "pstrf", info,""); - - THTensor_(clearUpLoTriangle)(ra__, uplo); - - THTensor_(freeCopyTo)(ra__, ra_); - THTensor_(free)(work); -} - -/* - Perform a QR decomposition of a matrix. - - In LAPACK, two parts of the QR decomposition are implemented as two separate - functions: geqrf and orgqr. For flexibility and efficiency, these are wrapped - directly, below - but to make the common usage convenient, we also provide - this function, which calls them both and returns the results in a more - intuitive form. - - Args: - * `rq_` - result Tensor in which to store the Q part of the decomposition. - * `rr_` - result Tensor in which to store the R part of the decomposition. - * `a` - input Tensor; the matrix to decompose. - -*/ -void THTensor_(qr)(THTensor *rq_, THTensor *rr_, THTensor *a) -{ - int m = a->size[0]; - int n = a->size[1]; - int k = (m < n ? m : n); - THTensor *ra_ = THTensor_(new)(); - THTensor *rtau_ = THTensor_(new)(); - THTensor *rr__ = THTensor_(new)(); - THTensor_(geqrf)(ra_, rtau_, a); - THTensor_(resize2d)(rr__, k, ra_->size[1]); - THTensor_(narrow)(rr__, ra_, 0, 0, k); - THTensor_(triu)(rr_, rr__, 0); - THTensor_(resize2d)(rq_, ra_->size[0], k); - THTensor_(orgqr)(rq_, ra_, rtau_); - THTensor_(narrow)(rq_, rq_, 1, 0, k); - THTensor_(free)(ra_); - THTensor_(free)(rtau_); - THTensor_(free)(rr__); -} - -/* - The geqrf function does the main work of QR-decomposing a matrix. - However, rather than producing a Q matrix directly, it produces a sequence of - elementary reflectors which may later be composed to construct Q - for example - with the orgqr function, below. - - Args: - * `ra_` - Result matrix which will contain: - i) The elements of R, on and above the diagonal. - ii) Directions of the reflectors implicitly defining Q. - * `rtau_` - Result tensor which will contain the magnitudes of the reflectors - implicitly defining Q. - * `a` - Input matrix, to decompose. If NULL, `ra_` is used as input. - - For further details, please see the LAPACK documentation. - -*/ -void THTensor_(geqrf)(THTensor *ra_, THTensor *rtau_, THTensor *a) -{ - if (a == NULL) ra_ = a; - THArgCheck(a->nDimension == 2, 1, "A should be 2 dimensional"); - - THTensor *ra__ = NULL; - - /* Prepare the input for LAPACK, making a copy if necessary. */ - ra__ = THTensor_(cloneColumnMajor)(ra_, a); - - int m = ra__->size[0]; - int n = ra__->size[1]; - int k = (m < n ? m : n); - int lda = m; - THTensor_(resize1d)(rtau_, k); - - /* Dry-run to query the suggested size of the workspace. */ - int info = 0; - real wkopt = 0; - THLapack_(geqrf)(m, n, THTensor_(data)(ra__), lda, - THTensor_(data)(rtau_), - &wkopt, -1, &info); - - /* Allocate the workspace and call LAPACK to do the real work. */ - int lwork = (int)wkopt; - THTensor *work = THTensor_(newWithSize1d)(lwork); - THLapack_(geqrf)(m, n, THTensor_(data)(ra__), lda, - THTensor_(data)(rtau_), - THTensor_(data)(work), lwork, &info); - - THLapackCheckWithCleanup("Lapack Error %s : unknown Lapack error. info = %i", - THCleanup( - THTensor_(free)(ra__); - THTensor_(free)(work);), - "geqrf", info,""); - - THTensor_(freeCopyTo)(ra__, ra_); - THTensor_(free)(work); -} - -/* - The orgqr function allows reconstruction of a matrix Q with orthogonal - columns, from a sequence of elementary reflectors, such as is produced by the - geqrf function. - - Args: - * `ra_` - result Tensor, which will contain the matrix Q. - * `a` - input Tensor, which should be a matrix with the directions of the - elementary reflectors below the diagonal. If NULL, `ra_` is used as - input. - * `tau` - input Tensor, containing the magnitudes of the elementary - reflectors. - - For further details, please see the LAPACK documentation. - -*/ -void THTensor_(orgqr)(THTensor *ra_, THTensor *a, THTensor *tau) -{ - if (a == NULL) a = ra_; - THArgCheck(a->nDimension == 2, 1, "A should be 2 dimensional"); - - THTensor *ra__ = NULL; - ra__ = THTensor_(cloneColumnMajor)(ra_, a); - - int m = ra__->size[0]; - int n = ra__->size[1]; - int k = tau->size[0]; - int lda = m; - - /* Dry-run to query the suggested size of the workspace. */ - int info = 0; - real wkopt = 0; - THLapack_(orgqr)(m, k, k, THTensor_(data)(ra__), lda, - THTensor_(data)(tau), - &wkopt, -1, &info); - - /* Allocate the workspace and call LAPACK to do the real work. */ - int lwork = (int)wkopt; - THTensor *work = THTensor_(newWithSize1d)(lwork); - THLapack_(orgqr)(m, k, k, THTensor_(data)(ra__), lda, - THTensor_(data)(tau), - THTensor_(data)(work), lwork, &info); - - THLapackCheckWithCleanup(" Lapack Error %s : unknown Lapack error. info = %i", - THCleanup( - THTensor_(free)(ra__); - THTensor_(free)(work);), - "orgqr", info,""); - THTensor_(freeCopyTo)(ra__, ra_); - THTensor_(free)(work); -} - -/* - The ormqr function multiplies Q with another matrix from a sequence of - elementary reflectors, such as is produced by the geqrf function. - - Args: - * `ra_` - result Tensor, which will contain the matrix Q' c. - * `a` - input Tensor, which should be a matrix with the directions of the - elementary reflectors below the diagonal. If NULL, `ra_` is used as - input. - * `tau` - input Tensor, containing the magnitudes of the elementary - reflectors. - * `c` - input Tensor, containing the matrix to be multiplied. - * `side` - char, determining whether c is left- or right-multiplied with Q. - * `trans` - char, determining whether to transpose Q before multiplying. - - For further details, please see the LAPACK documentation. - -*/ -void THTensor_(ormqr)(THTensor *ra_, THTensor *a, THTensor *tau, THTensor *c, const char *side, const char *trans) -{ - if (a == NULL) a = ra_; - THArgCheck(a->nDimension == 2, 1, "A should be 2 dimensional"); - - THTensor *ra__ = NULL; - ra__ = THTensor_(cloneColumnMajor)(ra_, c); - - int m = c->size[0]; - int n = c->size[1]; - int k = tau->size[0]; - int lda; - if (*side == 'L') - { - lda = m; - } - else - { - lda = n; - } - int ldc = m; - - /* Dry-run to query the suggested size of the workspace. */ - int info = 0; - real wkopt = 0; - THLapack_(ormqr)(side[0], trans[0], m, n, k, THTensor_(data)(a), lda, - THTensor_(data)(tau), THTensor_(data)(ra__), ldc, - &wkopt, -1, &info); - - /* Allocate the workspace and call LAPACK to do the real work. */ - int lwork = (int)wkopt; - THTensor *work = THTensor_(newWithSize1d)(lwork); - THLapack_(ormqr)(side[0], trans[0], m, n, k, THTensor_(data)(a), lda, - THTensor_(data)(tau), THTensor_(data)(ra__), ldc, - THTensor_(data)(work), lwork, &info); - - THLapackCheckWithCleanup(" Lapack Error %s : unknown Lapack error. info = %i", - THCleanup( - THTensor_(free)(ra__); - THTensor_(free)(work);), - "ormqr", info,""); - THTensor_(freeCopyTo)(ra__, ra_); - THTensor_(free)(work); -} - -void THTensor_(btrifact)(THTensor *ra_, THIntTensor *rpivots_, THIntTensor *rinfo_, int pivot, THTensor *a) -{ - THArgCheck(THTensor_(nDimension)(a) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(a)); - if (!pivot) { - THError("btrifact without pivoting is not implemented on the CPU"); - } - - if (ra_ != a) { - THTensor_(resizeAs)(ra_, a); - THTensor_(copy)(ra_, a); - } - - int m = a->size[1]; - int n = a->size[2]; - if (m != n) { - THError("btrifact is only implemented for square matrices"); - } - long num_batches = THTensor_(size)(a, 0); - THTensor *ra__; - int lda; - - if (ra_->stride[1] == 1) { - // column ordered, what BLAS wants - lda = ra_->stride[2]; - ra__ = ra_; - } else { - // not column ordered, need to make it such (requires copy) - THTensor *transp_r_ = THTensor_(newTranspose)(ra_, 1, 2); - ra__ = THTensor_(newClone)(transp_r_); - THTensor_(free)(transp_r_); - THTensor_(transpose)(ra__, NULL, 1, 2); - lda = ra__->stride[2]; - } - - THTensor *ai = THTensor_(new)(); - THTensor *rai = THTensor_(new)(); - THIntTensor *rpivoti = THIntTensor_new(); - - int info = 0; - int *info_ptr = &info; - if (rinfo_) { - THIntTensor_resize1d(rinfo_, num_batches); - info_ptr = THIntTensor_data(rinfo_); - } - - THIntTensor_resize2d(rpivots_, num_batches, n); - - long batch = 0; - for (; batch < num_batches; ++batch) { - THTensor_(select)(ai, a, 0, batch); - THTensor_(select)(rai, ra__, 0, batch); - THIntTensor_select(rpivoti, rpivots_, 0, batch); - - THLapack_(getrf)(n, n, THTensor_(data)(rai), lda, - THIntTensor_data(rpivoti), info_ptr); - if (rinfo_) { - info_ptr++; - } else if (info != 0) { - break; - } - } - - THTensor_(free)(ai); - THTensor_(free)(rai); - THIntTensor_free(rpivoti); - - if (ra__ != ra_) { - THTensor_(freeCopyTo)(ra__, ra_); - } - - if (!rinfo_ && info != 0) { - THError("failed to factorize batch element %ld (info == %d)", batch, info); - } -} - -void THTensor_(btrisolve)(THTensor *rb_, THTensor *b, THTensor *atf, THIntTensor *pivots) -{ - THArgCheck(THTensor_(nDimension)(atf) == 3, 1, "expected 3D tensor, got %dD", - THTensor_(nDimension)(atf)); - THArgCheck(THTensor_(nDimension)(b) == 3 || - THTensor_(nDimension)(b) == 2, 4, "expected 2D or 3D tensor"); - THArgCheck(THTensor_(size)(atf, 0) == - THTensor_(size)(b, 0), 3, "number of batches must be equal"); - THArgCheck(THTensor_(size)(atf, 1) == - THTensor_(size)(atf, 2), 3, "A matrices must be square"); - THArgCheck(THTensor_(size)(atf, 1) == - THTensor_(size)(b, 1), 3, "dimensions of A and b must be equal"); - - if (rb_ != b) { - THTensor_(resizeAs)(rb_, b); - THTensor_(copy)(rb_, b); - } - - long num_batches = atf->size[0]; - long n = atf->size[1]; - int nrhs = rb_->nDimension > 2 ? rb_->size[2] : 1; - - int lda, ldb; - THTensor *atf_; - THTensor *rb__; - - // correct ordering of A - if (atf->stride[1] == 1) { - // column ordered, what BLAS wants - lda = atf->stride[2]; - atf_ = atf; - } else { - // not column ordered, need to make it such (requires copy) - // it would be nice if we could use the op(A) flags to automatically - // transpose A if needed, but this leads to unpredictable behavior if the - // user clones A_tf later with a different ordering - THTensor *transp_r_ = THTensor_(newTranspose)(atf, 1, 2); - atf_ = THTensor_(newClone)(transp_r_); - THTensor_(free)(transp_r_); - THTensor_(transpose)(atf_, NULL, 1, 2); - lda = atf_->stride[2]; - } - - // correct ordering of B - if (rb_->stride[1] == 1) { - // column ordered - if (rb_->nDimension == 2 || rb_->size[2] == 1) { - ldb = n; - } else { - ldb = rb_->stride[2]; - } - rb__ = rb_; - } else { - // make column ordered - if (rb_->nDimension > 2) { - THTensor *transp_r_ = THTensor_(newTranspose)(rb_, 1, 2); - rb__ = THTensor_(newClone)(transp_r_); - THTensor_(free)(transp_r_); - THTensor_(transpose)(rb__, NULL, 1, 2); - ldb = rb__->stride[2]; - } else { - rb__ = THTensor_(newClone)(rb_); - ldb = n; - } - } - - THTensor *ai = THTensor_(new)(); - THTensor *rbi = THTensor_(new)(); - THIntTensor *pivoti = THIntTensor_new(); - - if (!THIntTensor_isContiguous(pivots)) { - THError("Error: rpivots_ is not contiguous."); - } - - for (long batch = 0; batch < num_batches; ++batch) { - THTensor_(select)(ai, atf_, 0, batch); - THTensor_(select)(rbi, rb__, 0, batch); - THIntTensor_select(pivoti, pivots, 0, batch); - -#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) - int info; - THLapack_(getrs)('N', n, nrhs, THTensor_(data)(ai), lda, - THIntTensor_data(pivoti), THTensor_(data)(rbi), - ldb, &info); - if (info != 0) { - THError("Error: Nonzero info."); - } -#else - THError("Unimplemented"); -#endif - } - - THTensor_(free)(ai); - THTensor_(free)(rbi); - THIntTensor_free(pivoti); - - if (atf_ != atf) { - THTensor_(free)(atf_); - } - - if (rb__ != rb_) { - THTensor_(freeCopyTo)(rb__, rb_); - } -} - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensorLapack.h b/contrib/lua-torch/torch7/lib/TH/generic/THTensorLapack.h deleted file mode 100644 index 8785943485..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensorLapack.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensorLapack.h" -#else - -TH_API void THTensor_(gesv)(THTensor *rb_, THTensor *ra_, THTensor *b_, THTensor *a_); -TH_API void THTensor_(trtrs)(THTensor *rb_, THTensor *ra_, THTensor *b_, THTensor *a_, const char *uplo, const char *trans, const char *diag); -TH_API void THTensor_(gels)(THTensor *rb_, THTensor *ra_, THTensor *b_, THTensor *a_); -TH_API void THTensor_(syev)(THTensor *re_, THTensor *rv_, THTensor *a_, const char *jobz, const char *uplo); -TH_API void THTensor_(geev)(THTensor *re_, THTensor *rv_, THTensor *a_, const char *jobvr); -TH_API void THTensor_(gesvd)(THTensor *ru_, THTensor *rs_, THTensor *rv_, THTensor *a, const char *jobu); -TH_API void THTensor_(gesvd2)(THTensor *ru_, THTensor *rs_, THTensor *rv_, THTensor *ra_, THTensor *a, const char *jobu); -TH_API void THTensor_(getri)(THTensor *ra_, THTensor *a); -TH_API void THTensor_(potrf)(THTensor *ra_, THTensor *a, const char *uplo); -TH_API void THTensor_(potrs)(THTensor *rb_, THTensor *b_, THTensor *a_, const char *uplo); -TH_API void THTensor_(potri)(THTensor *ra_, THTensor *a, const char *uplo); -TH_API void THTensor_(qr)(THTensor *rq_, THTensor *rr_, THTensor *a); -TH_API void THTensor_(geqrf)(THTensor *ra_, THTensor *rtau_, THTensor *a); -TH_API void THTensor_(orgqr)(THTensor *ra_, THTensor *a, THTensor *tau); -TH_API void THTensor_(ormqr)(THTensor *ra_, THTensor *a, THTensor *tau, THTensor *c, const char *side, const char *trans); -TH_API void THTensor_(pstrf)(THTensor *ra_, THIntTensor *rpiv_, THTensor*a, const char* uplo, real tol); - -TH_API void THTensor_(btrifact)(THTensor *ra_, THIntTensor *rpivots_, THIntTensor *rinfo_, int pivot, THTensor *a); -TH_API void THTensor_(btrisolve)(THTensor *rb_, THTensor *b, THTensor *atf, THIntTensor *pivots); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensorMath.c b/contrib/lua-torch/torch7/lib/TH/generic/THTensorMath.c deleted file mode 100644 index db7a0cb190..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensorMath.c +++ /dev/null @@ -1,3275 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensorMath.c" -#else - -#ifndef NAN - #define NAN (nan(NULL)) -#endif - -#ifdef _OPENMP -#include -#endif - -#define TH_OMP_OVERHEAD_THRESHOLD 100000 - -#ifdef _OPENMP - -#ifndef _WIN32 -#define PRAGMA(P) _Pragma(#P) -#else -#define PRAGMA(P) __pragma(P) -#endif - -#define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \ -{ \ - ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR); \ - PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \ - { \ - size_t num_threads = omp_get_num_threads(); \ - size_t tid = omp_get_thread_num(); \ - ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ - ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ - TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ - ptrdiff_t TENSOR##_len = TH_TENSOR_end - TH_TENSOR_offset; \ - TYPE *TENSOR##_data = THTensor_(data)(TENSOR) + TH_TENSOR_offset; \ - CODE \ - } \ -} -#else -#define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \ -{ \ - TYPE *TENSOR##_data = THTensor_(data)(TENSOR); \ - ptrdiff_t TENSOR##_len = THTensor_(nElement)(TENSOR); \ - CODE \ -} -#endif - -#ifdef _OPENMP -#define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \ -{ \ - ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \ - PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \ - { \ - size_t num_threads = omp_get_num_threads(); \ - size_t tid = omp_get_thread_num(); \ - ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ - ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ - TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ - ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \ - TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \ - TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \ - CODE \ - } \ -} -#else -#define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \ -{ \ - TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \ - TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \ - ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \ - CODE \ -} -#endif - -#ifdef _OPENMP -#define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \ -{ \ - ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \ - PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \ - { \ - size_t num_threads = omp_get_num_threads(); \ - size_t tid = omp_get_thread_num(); \ - ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ - ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ - TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ - ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \ - TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \ - TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \ - TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3) + TH_TENSOR_offset; \ - CODE \ - } \ -} -#else -#define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \ -{ \ - TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \ - TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \ - TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3); \ - ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \ - CODE \ -} -#endif - -void THTensor_(fill)(THTensor *r_, real value) -{ - if (THTensor_(isContiguous)(r_) || THTensor_(isTransposed)(r_)) { - TH_TENSOR_APPLY_CONTIG(real, r_, THVector_(fill)(r__data, value, r__len);); - } else { - TH_TENSOR_APPLY(real, r_, - if (r__stride == 1) { - THVector_(fill)(r__data, value, r__size); - r__i = r__size; - r__data += r__stride * r__size; - break; - } else { - *r__data = value; - } - ); - } -} - -void THTensor_(zero)(THTensor *r_) -{ - THTensor_(fill)(r_, 0); -} - -void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value) -{ - TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, - if (*mask_data > 1) - { - THFree(mask_counter); - THFree(tensor_counter); - THError("Mask tensor can take 0 and 1 values only"); - } - else if (*mask_data == 1) - { - *tensor_data = value; - }); -} - -void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src ) -{ - THTensor *srct = THTensor_(newContiguous)(src); - real *src_data = THTensor_(data)(srct); - ptrdiff_t cntr = 0; - ptrdiff_t nelem = THTensor_(nElement)(srct); - if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask)) - { - THTensor_(free)(srct); - THError("Number of elements of destination tensor != Number of elements in mask"); - } - TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, - if (*mask_data > 1) - { - THTensor_(free)(srct); - THFree(mask_counter); - THFree(tensor_counter); - THError("Mask tensor can take 0 and 1 values only"); - } - else if (*mask_data == 1) - { - if (cntr == nelem) - { - THTensor_(free)(srct); - THFree(mask_counter); - THFree(tensor_counter); - THError("Number of elements of src < number of ones in mask"); - } - *tensor_data = *src_data; - src_data++; - cntr++; - }); - THTensor_(free)(srct); -} - -void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask) -{ - ptrdiff_t numel = THByteTensor_sumall(mask); - real *tensor_data; - -#ifdef DEBUG - THAssert(numel <= LONG_MAX); -#endif - THTensor_(resize1d)(tensor,numel); - tensor_data = THTensor_(data)(tensor); - TH_TENSOR_APPLY2(real, src, unsigned char, mask, - if (*mask_data > 1) - { - THFree(mask_counter); - THFree(src_counter); - THError("Mask tensor can take 0 and 1 values only"); - } - else if (*mask_data == 1) - { - *tensor_data = *src_data; - tensor_data++; - }); -} - -// Finds non-zero elements of a tensor and returns their subscripts -void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor) -{ - ptrdiff_t numel = 0; - long *subscript_data; - long i = 0; - long dim; - long div = 1; -#ifdef TH_REAL_IS_HALF -#define IS_NONZERO(val) ((val.x & 0x7fff) != 0) -#else -#define IS_NONZERO(val) ((val)!=0) -#endif - - /* First Pass to determine size of subscripts */ - TH_TENSOR_APPLY(real, tensor, - if IS_NONZERO(*tensor_data) { - ++numel; - }); -#ifdef DEBUG - THAssert(numel <= LONG_MAX); -#endif - THLongTensor_resize2d(subscript, numel, tensor->nDimension); - - /* Second pass populates subscripts */ - subscript_data = THLongTensor_data(subscript); - TH_TENSOR_APPLY(real, tensor, - if IS_NONZERO(*tensor_data) { - div = 1; - - for (dim = tensor->nDimension - 1; dim >= 0; dim--) { - *(subscript_data + dim) = (i/div) % tensor->size[dim]; - div *= tensor->size[dim]; - } - - subscript_data += tensor->nDimension; - } - ++i;); -} - -void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) -{ - ptrdiff_t i, numel; - THLongStorage *newSize; - THTensor *tSlice, *sSlice; - long *index_data; - real *tensor_data, *src_data; - - THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); - THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); - THArgCheck(src->nDimension > 0,2,"Source tensor is empty"); - - numel = THLongTensor_nElement(index); - - newSize = THLongStorage_newWithSize(src->nDimension); - THLongStorage_rawCopy(newSize,src->size); -#ifdef DEBUG - THAssert(numel <= LONG_MAX); -#endif - newSize->data[dim] = numel; - THTensor_(resize)(tensor,newSize,NULL); - THLongStorage_free(newSize); - - index = THLongTensor_newContiguous(index); - index_data = THLongTensor_data(index); - - if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor)) - { - tensor_data = THTensor_(data)(tensor); - src_data = THTensor_(data)(src); - ptrdiff_t rowsize = THTensor_(nElement)(src) / src->size[0]; - - // check that the indices are within range - long max = src->size[0] - 1 + TH_INDEX_BASE; - for (i=0; i max) { - THLongTensor_free(index); - THError("index out of range"); - } - } - - if (src->nDimension == 1) { - #pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; inDimension == 1) - { - for (i=0; inDimension == 1, 3, "Index is supposed to be a vector"); - THArgCheck(dim < src->nDimension, 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); - THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)"); - - index = THLongTensor_newContiguous(index); - index_data = THLongTensor_data(index); - - if (tensor->nDimension > 1 ) - { - tSlice = THTensor_(new)(); - sSlice = THTensor_(new)(); - - for (i=0; inDimension == 1, 3, "Index is supposed to be a vector"); - THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); - THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)"); - - index = THLongTensor_newContiguous(index); - index_data = THLongTensor_data(index); - - if (tensor->nDimension > 1) - { - tSlice = THTensor_(new)(); - sSlice = THTensor_(new)(); - - for (i=0; inDimension == 1, 3, "Index is supposed to be a vector"); - THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); - - index = THLongTensor_newContiguous(index); - index_data = THLongTensor_data(index); - - for (i=0; inDimension > 1) - { - tSlice = THTensor_(new)(); - THTensor_(select)(tSlice, tensor,dim,index_data[i] - TH_INDEX_BASE); - THTensor_(fill)(tSlice, val); - THTensor_(free)(tSlice); - } - else - { - THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, val); - } - } - THLongTensor_free(index); -} - -void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) -{ - long elems_per_row, i, idx; - - THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2, - "Input tensor must have same dimensions as output tensor"); - THArgCheck(dim < THTensor_(nDimension)(tensor), 3, "Index dimension is out of bounds"); - THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4, - "Index tensor must have same dimensions as input tensor"); - - elems_per_row = THLongTensor_size(index, dim); - - TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim, - for (i = 0; i < elems_per_row; ++i) - { - idx = *(index_data + i*index_stride); - if (idx < TH_INDEX_BASE || idx >= src_size + TH_INDEX_BASE) - { - THFree(TH_TENSOR_DIM_APPLY_counter); - THError("Invalid index in gather"); - } - *(tensor_data + i*tensor_stride) = src_data[(idx - TH_INDEX_BASE) * src_stride]; - }) -} - -void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) -{ - long elems_per_row, i, idx; - - THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); - THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, - "Index tensor must have same dimensions as output tensor"); - THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4, - "Input tensor must have same dimensions as output tensor"); - - elems_per_row = THLongTensor_size(index, dim); - - TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim, - for (i = 0; i < elems_per_row; ++i) - { - idx = *(index_data + i*index_stride); - if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) - { - THFree(TH_TENSOR_DIM_APPLY_counter); - THError("Invalid index in scatter"); - } - tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = *(src_data + i*src_stride); - }) -} - -void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) -{ - long elems_per_row, i, idx; - - THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); - THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, - "Index tensor must have same dimensions as output tensor"); - THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4, - "Input tensor must have same dimensions as output tensor"); - - elems_per_row = THLongTensor_size(index, dim); - - TH_TENSOR_DIM_APPLY3(real, tensor, real, src, long, index, dim, - for (i = 0; i < elems_per_row; ++i) - { - idx = *(index_data + i*index_stride); - if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) - { - THFree(TH_TENSOR_DIM_APPLY_counter); - THError("Invalid index in scatterAdd"); - } - tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] += *(src_data + i*src_stride); - }) -} - -void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val) -{ - long elems_per_row, i, idx; - - THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); - THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, - "Index tensor must have same dimensions as output tensor"); - - elems_per_row = THLongTensor_size(index, dim); - - TH_TENSOR_DIM_APPLY2(real, tensor, long, index, dim, - for (i = 0; i < elems_per_row; ++i) - { - idx = *(index_data + i*index_stride); - if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) - { - THFree(TH_TENSOR_DIM_APPLY_counter); - THError("Invalid index in scatter"); - } - tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = val; - }) -} - -accreal THTensor_(dot)(THTensor *tensor, THTensor *src) -{ - accreal sum = 0; - /* we use a trick here. careful with that. */ - TH_TENSOR_APPLY2(real, tensor, real, src, - long sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i); - sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride); - tensor_i += sz; - src_i += sz; - tensor_data += sz*tensor_stride; - src_data += sz*src_stride; - break;); - return sum; -} - - -#undef th_isnan -#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) -#define th_isnan(val) \ -(isnan(val)) -#else -#define th_isnan(val) (0) -#endif - -#undef th_isnan_break -#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) -#define th_isnan_break(val) \ -if (isnan(val)) break; -#else -#define th_isnan_break(val) -#endif - -real THTensor_(minall)(THTensor *tensor) -{ - real theMin; - real value; - - THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); - theMin = THTensor_(data)(tensor)[0]; - TH_TENSOR_APPLY(real, tensor, - value = *tensor_data; - /* This is not the same as value= theMin)) - { - theMin = value; - th_isnan_break(value) - }); - return theMin; -} - -real THTensor_(maxall)(THTensor *tensor) -{ - real theMax; - real value; - - THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); - theMax = THTensor_(data)(tensor)[0]; - TH_TENSOR_APPLY(real, tensor, - value = *tensor_data; - /* This is not the same as value>theMax in the case of NaNs */ - if(!(value <= theMax)) - { - theMax = value; - th_isnan_break(value) - }); - return theMax; -} - -static void THTensor_(quickselectnoidx)(real *arr, long k, long elements, long stride); - -real THTensor_(medianall)(THTensor *tensor) -{ - THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); - - real theMedian; - ptrdiff_t numel; - long k; - THTensor *temp_; - real *temp__data; - - numel = THTensor_(nElement)(tensor); - k = (numel-1) >> 1; - - temp_ = THTensor_(newClone)(tensor); - temp__data = THTensor_(data)(temp_); - - THTensor_(quickselectnoidx)(temp__data, k, numel, 1); - - theMedian = temp__data[k]; - - THTensor_(free)(temp_); - - return theMedian; -} - -accreal THTensor_(sumall)(THTensor *tensor) -{ - accreal sum = 0; - TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;); - return sum; -} - -accreal THTensor_(prodall)(THTensor *tensor) -{ - accreal prod = 1; - TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;); - return prod; -} - -void THTensor_(add)(THTensor *r_, THTensor *t, real value) -{ - THTensor_(resizeAs)(r_, t); - if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { - TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(adds)(r__data, t_data, value, r__len);); - } else { - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;); - } -} - -void THTensor_(sub)(THTensor *r_, THTensor *t, real value) -{ - THTensor_(add)(r_, t, -value); -} - -void THTensor_(mul)(THTensor *r_, THTensor *t, real value) -{ - THTensor_(resizeAs)(r_, t); - if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { - TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(muls)(r__data, t_data, value, r__len);); - } else { - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;); - } -} - -void THTensor_(div)(THTensor *r_, THTensor *t, real value) -{ - THTensor_(resizeAs)(r_, t); - if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { - TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(divs)(r__data, t_data, value, r__len);); - } else { - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;); - } -} - -void THTensor_(lshift)(THTensor *r_, THTensor *t, real value) -{ -#if defined(TH_REAL_IS_FLOAT) - return THTensor_(mul)(r_, t, powf(2, value)); -#elif defined(TH_REAL_IS_DOUBLE) - return THTensor_(mul)(r_, t, pow(2, value)); -#elif defined(TH_REAL_IS_HALF) - return THError("lshift is not supported for torch.HalfTensor"); -#else - THTensor_(resizeAs)(r_, t); - if (THTensor_(isContiguous)(r_) && - THTensor_(isContiguous)(t) && - THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { - real *tp = THTensor_(data)(t); - real *rp = THTensor_(data)(r_); - long sz = THTensor_(nElement)(t); - long i; - #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) - for (i=0; i> value; -#else - rp[i] = ((unsigned real) tp[i]) >> value; -#endif - } - } else { -#if defined(TH_REAL_IS_BYTE) - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) >> value);); -#else - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((unsigned real) *t_data) >> value);); -#endif - } -#endif -} - -void THTensor_(fmod)(THTensor *r_, THTensor *t, real value) -{ - THTensor_(resizeAs)(r_, t); - if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { - - real *tp = THTensor_(data)(t); - real *rp = THTensor_(data)(r_); - ptrdiff_t sz = THTensor_(nElement)(t); - ptrdiff_t i; - #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i max_value ? max_value : tp[i]); - } else { - TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data);); - } -} - -void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src) -{ - THTensor_(resizeAs)(r_, t); - if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { - if(r_ == t) { - THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1); - } else { - TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cadd)(r__data, t_data, src_data, value, r__len);); - } - } else { - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;); - } -} - -void THTensor_(csub)(THTensor *r_, THTensor *t, real value,THTensor *src) -{ - THTensor_(cadd)(r_, t, -value, src); -} - -void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src) -{ - THTensor_(resizeAs)(r_, t); - if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { - TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cmul)(r__data, t_data, src_data, r__len);); - } else { - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;); - } -} - -void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src) -{ - THTensor_(resizeAs)(r_, t); - if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { - real *tp = THTensor_(data)(t); - real *sp = THTensor_(data)(src); - real *rp = THTensor_(data)(r_); - ptrdiff_t sz = THTensor_(nElement)(t); - ptrdiff_t i; - #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i> sp[i]; -#else - rp[i] = ((unsigned real) tp[i]) >> sp[i]; -#endif - } - } else { -#if defined(TH_REAL_IS_FLOAT) - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data);); -#elif defined(TH_REAL_IS_DOUBLE) - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data);); -#elif defined(TH_REAL_IS_BYTE) - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;); -#else - TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((unsigned real)*t_data) >> *src_data;); -#endif - } -} - -void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src) -{ - THTensor_(resizeAs)(r_, t); - if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { - real *tp = THTensor_(data)(t); - real *sp = THTensor_(data)(src); - real *rp = THTensor_(data)(r_); - ptrdiff_t sz = THTensor_(nElement)(t); - ptrdiff_t i; - #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; i TH_OMP_OVERHEAD_THRESHOLD) private(i) - for (i=0; inDimension != 2) || (vec->nDimension != 1) ) - THError("matrix and vector expected, got %dD, %dD", - mat->nDimension, vec->nDimension); - - if( mat->size[1] != vec->size[0] ) { - THDescBuff bm = THTensor_(sizeDesc)(mat); - THDescBuff bv = THTensor_(sizeDesc)(vec); - THError("size mismatch, %s, %s", bm.str, bv.str); - } - - if(t->nDimension != 1) - THError("vector expected, got t: %dD", t->nDimension); - - if(t->size[0] != mat->size[0]) { - THDescBuff bt = THTensor_(sizeDesc)(t); - THDescBuff bm = THTensor_(sizeDesc)(mat); - THError("size mismatch, t: %s, mat: %s", bt.str, bm.str); - } - - if(r_ != t) - { - THTensor_(resizeAs)(r_, t); - THTensor_(copy)(r_, t); - } - - if(mat->stride[0] == 1) - { - THBlas_(gemv)('n', mat->size[0], mat->size[1], - alpha, THTensor_(data)(mat), mat->stride[1], - THTensor_(data)(vec), vec->stride[0], - beta, THTensor_(data)(r_), r_->stride[0]); - } - else if(mat->stride[1] == 1) - { - THBlas_(gemv)('t', mat->size[1], mat->size[0], - alpha, THTensor_(data)(mat), mat->stride[0], - THTensor_(data)(vec), vec->stride[0], - beta, THTensor_(data)(r_), r_->stride[0]); - } - else - { - THTensor *cmat = THTensor_(newContiguous)(mat); - - THBlas_(gemv)('t', mat->size[1], mat->size[0], - alpha, THTensor_(data)(cmat), cmat->stride[0], - THTensor_(data)(vec), vec->stride[0], - beta, THTensor_(data)(r_), r_->stride[0]); - - THTensor_(free)(cmat); - } -} - -void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain) -{ - long N1 = m1->size[0]; - long N2 = m2->size[0]; - long dim; - real *m1_p; - real *m2_p; - real *r_p; - long i; - - THTensor_(resize2d)(r_, N1, N2); - - m1 = THTensor_(newContiguous)(m1); - m2 = THTensor_(newContiguous)(m2); - - THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1); - THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2); - - dim = m1->size[1]; - THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim"); - - m1_p = THTensor_(data)(m1); - m2_p = THTensor_(data)(m2); - r_p = THTensor_(data)(r_); - -#pragma omp parallel for private(i) - for (i=0; inDimension != 2) || (m2->nDimension != 2)) - THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension); - - if(m1->size[1] != m2->size[0]) { - THDescBuff bm1 = THTensor_(sizeDesc)(m1); - THDescBuff bm2 = THTensor_(sizeDesc)(m2); - THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); - } - - if( t->nDimension != 2 ) - THError("matrix expected, got %dD tensor for t", t->nDimension); - - if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) { - THDescBuff bt = THTensor_(sizeDesc)(t); - THDescBuff bm1 = THTensor_(sizeDesc)(m1); - THDescBuff bm2 = THTensor_(sizeDesc)(m2); - THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); - } - - if(t != r_) - { - THTensor_(resizeAs)(r_, t); - THTensor_(copy)(r_, t); - } - - /* r_ */ - if(r_->stride[0] == 1 && - r_->stride[1] != 0) - { - transpose_r = 'n'; - r__ = r_; - } - else if(r_->stride[1] == 1 && - r_->stride[0] != 0) - { - THTensor *swap = m2; - m2 = m1; - m1 = swap; - transpose_r = 't'; - r__ = r_; - } - else - { - transpose_r = 'n'; - - THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1); - r__ = THTensor_(newClone)(transp_r_); - THTensor_(free)(transp_r_); - THTensor_(transpose)(r__, NULL, 0, 1); - } - - /* m1 */ - if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && - m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0) - { - transpose_m1 = 'n'; - m1_ = m1; - } - else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && - m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0) - { - transpose_m1 = 't'; - m1_ = m1; - } - else - { - transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); - m1_ = THTensor_(newContiguous)(m1); - } - - /* m2 */ - if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && - m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0) - { - transpose_m2 = 'n'; - m2_ = m2; - } - else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && - m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0) - { - transpose_m2 = 't'; - m2_ = m2; - } - else - { - transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); - m2_ = THTensor_(newContiguous)(m2); - } - -#pragma omp critical(blasgemm) - /* do the operation */ - THBlas_(gemm)(transpose_m1, - transpose_m2, - r__->size[(transpose_r == 'n' ? 0 : 1)], - r__->size[(transpose_r == 'n' ? 1 : 0)], - m1_->size[(transpose_r == 'n' ? 1 : 0)], - alpha, - THTensor_(data)(m1_), - (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), - THTensor_(data)(m2_), - (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), - beta, - THTensor_(data)(r__), - r__->stride[(transpose_r == 'n' ? 1 : 0)]); - - /* free intermediate variables */ - if(m1_ != m1) - THTensor_(free)(m1_); - - if(m2_ != m2) - THTensor_(free)(m2_); - - if(r__ != r_) - THTensor_(freeCopyTo)(r__, r_); -} - -void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2) -{ - if( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) - THError("vector and vector expected, got %dD, %dD tensors", - vec1->nDimension, vec2->nDimension); - - if(t->nDimension != 2) - THError("expected matrix, got %dD tensor for t", t->nDimension); - - if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) { - THDescBuff bt = THTensor_(sizeDesc)(t); - THDescBuff bv1 = THTensor_(sizeDesc)(vec1); - THDescBuff bv2 = THTensor_(sizeDesc)(vec2); - THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str); - } - - if(r_ != t) - { - THTensor_(resizeAs)(r_, t); - THTensor_(copy)(r_, t); - } - - if(beta == 0) { - THTensor_(zero)(r_); - } - else if(beta != 1) - THTensor_(mul)(r_, r_, beta); - - if(r_->stride[0] == 1) - { - THBlas_(ger)(vec1->size[0], vec2->size[0], - alpha, THTensor_(data)(vec1), vec1->stride[0], - THTensor_(data)(vec2), vec2->stride[0], - THTensor_(data)(r_), r_->stride[1]); - } - else if(r_->stride[1] == 1) - { - THBlas_(ger)(vec2->size[0], vec1->size[0], - alpha, THTensor_(data)(vec2), vec2->stride[0], - THTensor_(data)(vec1), vec1->stride[0], - THTensor_(data)(r_), r_->stride[0]); - } - else - { - THTensor *cr = THTensor_(newClone)(r_); - - THBlas_(ger)(vec2->size[0], vec1->size[0], - alpha, THTensor_(data)(vec2), vec2->stride[0], - THTensor_(data)(vec1), vec1->stride[0], - THTensor_(data)(cr), cr->stride[0]); - - THTensor_(freeCopyTo)(cr, r_); - } -} - -void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) -{ - long batch; - - THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor"); - THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor"); - THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, - "equal number of batches expected, got %d, %d", - THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); - THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, - "wrong matrix size, batch1: %dx%d, batch2: %dx%d", - THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2), - THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2)); - - long dim1 = THTensor_(size)(batch1, 1); - long dim2 = THTensor_(size)(batch2, 2); - THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size"); - THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size"); - - if (t != result) { - THTensor_(resizeAs)(result, t); - THTensor_(copy)(result, t); - } - - THTensor *matrix1 = THTensor_(new)(); - THTensor *matrix2 = THTensor_(new)(); - - for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { - THTensor_(select)(matrix1, batch1, 0, batch); - THTensor_(select)(matrix2, batch2, 0, batch); - - THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2); - beta = 1; // accumulate output once - } - - THTensor_(free)(matrix1); - THTensor_(free)(matrix2); -} - -void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) -{ - long batch; - - THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1)); - THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2)); - THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, - "equal number of batches expected, got %d, %d", - THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); - THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, - "wrong matrix size, batch1: %dx%d, batch2: %dx%d", - THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2), - THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2)); - - long bs = THTensor_(size)(batch1, 0); - long dim1 = THTensor_(size)(batch1, 1); - long dim2 = THTensor_(size)(batch2, 2); - THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size"); - THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size"); - THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size"); - - if (t != result) { - THTensor_(resizeAs)(result, t); - THTensor_(copy)(result, t); - } - - THTensor *matrix1 = THTensor_(new)(); - THTensor *matrix2 = THTensor_(new)(); - THTensor *result_matrix = THTensor_(new)(); - - for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { - THTensor_(select)(matrix1, batch1, 0, batch); - THTensor_(select)(matrix2, batch2, 0, batch); - THTensor_(select)(result_matrix, result, 0, batch); - - THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2); - } - - THTensor_(free)(matrix1); - THTensor_(free)(matrix2); - THTensor_(free)(result_matrix); -} - -ptrdiff_t THTensor_(numel)(THTensor *t) -{ - return THTensor_(nElement)(t); -} - -void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) -{ - THLongStorage *dim; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", - dimension + TH_INDEX_BASE); - - dim = THTensor_(newSizeOf)(t); - THLongStorage_set(dim, dimension, 1); - THTensor_(resize)(values_, dim, NULL); - THLongTensor_resize(indices_, dim, NULL); - THLongStorage_free(dim); - - // two implementations optimized for data locality - if (t->stride[dimension] == 1) { - real theMax; - real value; - long theIndex; - long i; - TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, - theMax = t_data[0]; - theIndex = 0; - - for(i = 0; i < t_size; i++) - { - value = t_data[i*t_stride]; - /* This is not the same as value>theMax in the case of NaNs */ - if(!(value <= theMax)) - { - theIndex = i; - theMax = value; - th_isnan_break(value) - } - } - *indices__data = theIndex; - *values__data = theMax;); - } else { - if (THTensor_(nDimension)(t) > 1) { - THTensor *t0 = THTensor_(newSelect)(t, dimension, 0); - THTensor_(copy)(values_, t0); - THTensor_(free)(t0); - } else { - THTensor_(fill)(values_, THTensor_(get1d)(t, 0)); - } - THLongTensor_zero(indices_); - - if(t->size[dimension] == 1) { - return; - } - - THTensor *tempValues_ = THTensor_(newWithTensor)(values_); - // tempValues_.expand_as(t) - tempValues_->size[dimension] = t->size[dimension]; - tempValues_->stride[dimension] = 0; - - THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_); - // tempIndices_.expand_as(t) - tempIndices_->size[dimension] = t->size[dimension]; - tempIndices_->stride[dimension] = 0; - - TH_TENSOR_APPLY3_D(real, t, real, tempValues_, long, tempIndices_, dimension, - if(!(*t_data <= *tempValues__data) && !th_isnan(*tempValues__data)) { - *tempValues__data = *t_data; - *tempIndices__data = *tempIndices__dimOffset; - }); - - THTensor_(free)(tempValues_); - THLongTensor_free(tempIndices_); - } - - if (!keepdim) { - THTensor_(squeeze1d)(values_, values_, dimension); - THLongTensor_squeeze1d(indices_, indices_, dimension); - } -} - -void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) -{ - THLongStorage *dim; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", - dimension + TH_INDEX_BASE); - - dim = THTensor_(newSizeOf)(t); - THLongStorage_set(dim, dimension, 1); - THTensor_(resize)(values_, dim, NULL); - THLongTensor_resize(indices_, dim, NULL); - THLongStorage_free(dim); - - // two implementations optimized for data locality - if (t->stride[dimension] == 1) { - real theMax; - real value; - long theIndex; - long i; - TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, - theMax = t_data[0]; - theIndex = 0; - - for(i = 0; i < t_size; i++) - { - value = t_data[i*t_stride]; - /* This is not the same as value>theMax in the case of NaNs */ - if(!(value >= theMax)) - { - theIndex = i; - theMax = value; - th_isnan_break(value) - } - } - *indices__data = theIndex; - *values__data = theMax;); - } else { - if (THTensor_(nDimension)(t) > 1) { - THTensor *t0 = THTensor_(newSelect)(t, dimension, 0); - THTensor_(copy)(values_, t0); - THTensor_(free)(t0); - } else { - THTensor_(fill)(values_, THTensor_(get1d)(t, 0)); - } - THLongTensor_zero(indices_); - - if(t->size[dimension] == 1) { - return; - } - - THTensor *tempValues_ = THTensor_(newWithTensor)(values_); - // tempValues_.expand_as(t) - tempValues_->size[dimension] = t->size[dimension]; - tempValues_->stride[dimension] = 0; - - THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_); - // tempIndices_.expand_as(t) - tempIndices_->size[dimension] = t->size[dimension]; - tempIndices_->stride[dimension] = 0; - - TH_TENSOR_APPLY3_D(real, t, real, tempValues_, long, tempIndices_, dimension, - if(!(*t_data >= *tempValues__data) && !th_isnan(*tempValues__data)) { - *tempValues__data = *t_data; - *tempIndices__data = *tempIndices__dimOffset; - }); - } - - if (!keepdim) { - THTensor_(squeeze1d)(values_, values_, dimension); - THLongTensor_squeeze1d(indices_, indices_, dimension); - } -} - - -void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim) -{ - THLongStorage *dim; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", - dimension + TH_INDEX_BASE); - - dim = THTensor_(newSizeOf)(t); - THLongStorage_set(dim, dimension, 1); - THTensor_(resize)(r_, dim, NULL); - THLongStorage_free(dim); - - // two implementations optimized for data locality - if (t->stride[dimension] == 1) { - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, - accreal sum = 0; - long i; - for(i = 0; i < t_size; i++) - sum += t_data[i*t_stride]; - *r__data = (real)sum;); - } else { - THTensor_(zero)(r_); - THTensor *temp_ = THTensor_(newWithTensor)(r_); - // r_.expand_as(t) - temp_->size[dimension] = t->size[dimension]; - temp_->stride[dimension] = 0; - - TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data + *t_data;); - THTensor_(free)(temp_); - } - - if (!keepdim) { - THTensor_(squeeze1d)(r_, r_, dimension); - } -} - -void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim) -{ - THLongStorage *dim; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", - dimension + TH_INDEX_BASE); - - dim = THTensor_(newSizeOf)(t); - THLongStorage_set(dim, dimension, 1); - THTensor_(resize)(r_, dim, NULL); - THLongStorage_free(dim); - - // two implementations optimized for data locality - if (t->stride[dimension] == 1) { - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, - accreal prod = 1; - long i; - for(i = 0; i < t_size; i++) - prod *= t_data[i*t_stride]; - *r__data = (real)prod;); - } else { - THTensor_(fill)(r_, 1); - THTensor *temp_ = THTensor_(newWithTensor)(r_); - // r_.expand_as(t) - temp_->size[dimension] = t->size[dimension]; - temp_->stride[dimension] = 0; - - TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data * *t_data;); - THTensor_(free)(temp_); - } - - if (!keepdim) { - THTensor_(squeeze1d)(r_, r_, dimension); - } -} - -void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension) -{ - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", - dimension + TH_INDEX_BASE); - - THTensor_(resizeAs)(r_, t); - - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, - accreal cumsum = 0; - long i; - for(i = 0; i < t_size; i++) - { - cumsum += t_data[i*t_stride]; - r__data[i*r__stride] = (real)cumsum; - }); -} - -void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension) -{ - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", - dimension + TH_INDEX_BASE); - - THTensor_(resizeAs)(r_, t); - - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, - accreal cumprod = 1; - long i; - for(i = 0; i < t_size; i++) - { - cumprod *= t_data[i*t_stride]; - r__data[i*r__stride] = (real)cumprod; - }); -} - - -void THTensor_(sign)(THTensor *r_, THTensor *t) -{ - THTensor_(resizeAs)(r_, t); - -#if defined (TH_REAL_IS_BYTE) - TH_TENSOR_APPLY2(real, r_, real, t, - if (*t_data > 0) *r__data = 1; - else *r__data = 0;); -#else - TH_TENSOR_APPLY2(real, r_, real, t, - if (*t_data > 0) *r__data = 1; - else if (*t_data < 0) *r__data = -1; - else *r__data = 0;); -#endif -} - - -accreal THTensor_(trace)(THTensor *t) -{ - real *t_data = THTensor_(data)(t); - accreal sum = 0; - long i = 0; - long t_stride_0, t_stride_1, t_diag_size; - - THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); - - t_stride_0 = THTensor_(stride)(t, 0); - t_stride_1 = THTensor_(stride)(t, 1); - t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)); - while(i < t_diag_size) - { - sum += t_data[i*(t_stride_0+t_stride_1)]; - i++; - } - - return sum; -} - -void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension) -{ - int i; - - if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b)) - THError("inconsistent tensor dimension %dD, %dD", - THTensor_(nDimension)(a), THTensor_(nDimension)(b)); - - for(i = 0; i < THTensor_(nDimension)(a); i++) - { - if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) { - THDescBuff ba = THTensor_(sizeDesc)(a); - THDescBuff bb = THTensor_(sizeDesc)(b); - THError("inconsistent tensor sizes %s, %s", ba.str, bb.str); - } - } - - if(dimension < 0) - { - for(i = 0; i < THTensor_(nDimension)(a); i++) - { - if(THTensor_(size)(a, i) == 3) - { - dimension = i; - break; - } - } - if(dimension < 0) { - THDescBuff ba = THTensor_(sizeDesc)(a); - THError("no dimension of size 3 in a: %s", ba.str); - } - } - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range", - dimension + TH_INDEX_BASE); - THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3", - dimension + TH_INDEX_BASE); - - THTensor_(resizeAs)(r_, a); - - TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension, - r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride]; - r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride]; - r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];); -} - -void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) { - THTensor_(resizeAs)(r, t); - TH_TENSOR_APPLY3(real, r, real, t, real, src, - *r_data = *t_data > *src_data ? *t_data : *src_data;); -} - -void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) { - THTensor_(resizeAs)(r, t); - TH_TENSOR_APPLY3(real, r, real, t, real, src, - *r_data = *t_data < *src_data ? *t_data : *src_data;); -} - -void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) { - THTensor_(resizeAs)(r, t); - TH_TENSOR_APPLY2(real, r, real, t, - *r_data = *t_data > value ? *t_data : value;); -} - -void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) { - THTensor_(resizeAs)(r, t); - TH_TENSOR_APPLY2(real, r, real, t, - *r_data = *t_data < value ? *t_data : value;); -} - -void THTensor_(zeros)(THTensor *r_, THLongStorage *size) -{ - THTensor_(resize)(r_, size, NULL); - THTensor_(zero)(r_); -} - -void THTensor_(ones)(THTensor *r_, THLongStorage *size) -{ - THTensor_(resize)(r_, size, NULL); - THTensor_(fill)(r_, 1); -} - -void THTensor_(diag)(THTensor *r_, THTensor *t, int k) -{ - THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected"); - - if(THTensor_(nDimension)(t) == 1) - { - real *t_data = THTensor_(data)(t); - long t_stride_0 = THTensor_(stride)(t, 0); - long t_size = THTensor_(size)(t, 0); - long sz = t_size + (k >= 0 ? k : -k); - real *r__data; - long r__stride_0; - long r__stride_1; - long i; - - THTensor_(resize2d)(r_, sz, sz); - THTensor_(zero)(r_); - r__data = THTensor_(data)(r_); - r__stride_0 = THTensor_(stride)(r_, 0); - r__stride_1 = THTensor_(stride)(r_, 1); - r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0); - - for(i = 0; i < t_size; i++) - r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0]; - } - else - { - real *t_data = THTensor_(data)(t); - long t_stride_0 = THTensor_(stride)(t, 0); - long t_stride_1 = THTensor_(stride)(t, 1); - long sz; - real *r__data; - long r__stride_0; - long i; - - if(k >= 0) - sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k); - else - sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1)); - THTensor_(resize1d)(r_, sz); - r__data = THTensor_(data)(r_); - r__stride_0 = THTensor_(stride)(r_, 0); - - t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0); - for(i = 0; i < sz; i++) - r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)]; - } -} - -void THTensor_(eye)(THTensor *r_, long n, long m) -{ - real *r__data; - long i, sz; - - THArgCheck(n > 0, 1, "invalid argument"); - - if(m <= 0) - m = n; - - THTensor_(resize2d)(r_, n, m); - THTensor_(zero)(r_); - - i = 0; - r__data = THTensor_(data)(r_); - sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1)); - for(i = 0; i < sz; i++) - r__data[i*(r_->stride[0]+r_->stride[1])] = 1; -} - - -void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step) -{ - ptrdiff_t size; - real i = 0; - - THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number"); - THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) - , 2, "upper bound and larger bound incoherent with step sign"); - - size = (ptrdiff_t) (((xmax - xmin) / step) + 1); - - if (THTensor_(nElement)(r_) != size) { - THTensor_(resize1d)(r_, size); - } - - TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;); -} - -void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { -#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) - int m = fmod(xmax - xmin,step) == 0; -#else - int m = (xmax - xmin) % step == 0; -#endif - if (m) - xmax -= step; - THTensor_(range)(r_,xmin,xmax,step); -} - -void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, long n) -{ - real *r__data; - long r__stride_0; - long i; - - THArgCheck(n > 0, 1, "must be strictly positive"); - - THTensor_(resize1d)(r_, n); - r__data = THTensor_(data)(r_); - r__stride_0 = THTensor_(stride)(r_,0); - - for(i = 0; i < n; i++) - r__data[i*r__stride_0] = (real)(i); - - for(i = 0; i < n-1; i++) - { - long z = THRandom_random(_generator) % (n-i); - real sav = r__data[i*r__stride_0]; - r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0]; - r__data[(z+i)*r__stride_0] = sav; - } -} - -void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size) -{ - THTensor_(resize)(r_, size, NULL); - THTensor_(copy)(r_, t); -} - -/* I cut and pasted (slightly adapted) the quicksort code from - Sedgewick's 1978 "Implementing Quicksort Programs" article - http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf - - It is the state of the art existing implementation. The macros - are here to make as close a match as possible to the pseudocode of - Program 2 p.851 - - Note that other partition schemes exist, and are typically presented - in textbook, but those are less efficient. See e.g. - http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto - - Julien, November 12th 2013 -*/ -#define MAX_LEVELS 300 -#define M_SMALL 10 /* Limit for small subfiles */ - -#define ARR(III) arr[(III)*stride] -#define IDX(III) idx[(III)*stride] - -#define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap -#define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap - -#define ARR_SWAP(III, JJJ) \ - REAL_SWAP(ARR(III), ARR(JJJ)); - -#define BOTH_SWAP(III, JJJ) \ - REAL_SWAP(ARR(III), ARR(JJJ)); \ - LONG_SWAP(IDX(III), IDX(JJJ)) - -static void THTensor_(quicksortascend)(real *arr, long *idx, long elements, long stride) -{ - long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; - real rswap, piv; - unsigned char done = 0; - - /* beg[0]=0; end[0]=elements; */ - stack = 0; - L = 0; R = elements-1; - done = elements-1 <= M_SMALL; - - while(!done) { - /* Use median of three for pivot choice */ - P=(L+R)>>1; - BOTH_SWAP(P, L+1); - if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } - if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } - if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } - - i = L+1; j = R; piv = ARR(L); pid = IDX(L); - - do { - do { i = i+1; } while(ARR(i) < piv); - do { j = j-1; } while(ARR(j) > piv); - if (j < i) - break; - BOTH_SWAP(i, j); - } while(1); - BOTH_SWAP(L, j); - /* Left subfile is (L, j-1) */ - /* Right subfile is (i, R) */ - sz_left = j-L; - sz_right = R-i+1; - if (sz_left <= M_SMALL && sz_right <= M_SMALL) { - /* both subfiles are small */ - /* if stack empty */ - if (stack == 0) { - done = 1; - } else { - stack--; - L = beg[stack]; - R = end[stack]; - } - } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { - /* exactly one of the subfiles is small */ - /* (L,R) = large subfile */ - if (sz_left > sz_right) { - /* Implicit: L = L; */ - R = j-1; - } else { - L = i; - /* Implicit: R = R; */ - } - } else { - /* none of the subfiles is small */ - /* push large subfile */ - /* (L,R) = small subfile */ - if (sz_left > sz_right) { - beg[stack] = L; - end[stack] = j-1; - stack++; - L = i; - /* Implicit: R = R */ - } else { - beg[stack] = i; - end[stack] = R; - stack++; - /* Implicit: L = L; */ - R = j-1; - } - } - } /* while not done */ - /* Now insertion sort on the concatenation of subfiles */ - for(i=elements-2; i>=0; i--) { - if (ARR(i) > ARR(i+1)) { - piv = ARR(i); - pid = IDX(i); - j = i+1; - do { - ARR(j-1) = ARR(j); - IDX(j-1) = IDX(j); - j = j+1; - } while(j < elements && ARR(j) < piv); - ARR(j-1) = piv; - IDX(j-1) = pid; - } - } -} - -static void THTensor_(quicksortdescend)(real *arr, long *idx, long elements, long stride) -{ - long beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; - real rswap, piv; - unsigned char done = 0; - - /* beg[0]=0; end[0]=elements; */ - stack = 0; - L = 0; R = elements-1; - done = elements-1 <= M_SMALL; - - while(!done) { - /* Use median of three for pivot choice */ - P=(L+R)>>1; - BOTH_SWAP(P, L+1); - if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); } - if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); } - if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); } - - i = L+1; j = R; piv = ARR(L); pid = IDX(L); - - do { - do { i = i+1; } while(ARR(i) > piv); - do { j = j-1; } while(ARR(j) < piv); - if (j < i) - break; - BOTH_SWAP(i, j); - } while(1); - BOTH_SWAP(L, j); - /* Left subfile is (L, j-1) */ - /* Right subfile is (i, R) */ - sz_left = j-L; - sz_right = R-i+1; - if (sz_left <= M_SMALL && sz_right <= M_SMALL) { - /* both subfiles are small */ - /* if stack empty */ - if (stack == 0) { - done = 1; - } else { - stack--; - L = beg[stack]; - R = end[stack]; - } - } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { - /* exactly one of the subfiles is small */ - /* (L,R) = large subfile */ - if (sz_left > sz_right) { - /* Implicit: L = L; */ - R = j-1; - } else { - L = i; - /* Implicit: R = R; */ - } - } else { - /* none of the subfiles is small */ - /* push large subfile */ - /* (L,R) = small subfile */ - if (sz_left > sz_right) { - beg[stack] = L; - end[stack] = j-1; - stack++; - L = i; - /* Implicit: R = R */ - } else { - beg[stack] = i; - end[stack] = R; - stack++; - /* Implicit: L = L; */ - R = j-1; - } - } - } /* while not done */ - /* Now insertion sort on the concatenation of subfiles */ - for(i=elements-2; i>=0; i--) { - if (ARR(i) < ARR(i+1)) { - piv = ARR(i); - pid = IDX(i); - j = i+1; - do { - ARR(j-1) = ARR(j); - IDX(j-1) = IDX(j); - j = j+1; - } while(j < elements && ARR(j) > piv); - ARR(j-1) = piv; - IDX(j-1) = pid; - } - } -} - -#undef MAX_LEVELS -#undef M_SMALL - -void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder) -{ - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", - dimension + TH_INDEX_BASE); - - THTensor_(resizeAs)(rt_, t); - THTensor_(copy)(rt_, t); - - { - THLongStorage *size = THTensor_(newSizeOf)(t); - THLongTensor_resize(ri_, size, NULL); - THLongStorage_free(size); - } - - if(descendingOrder) - { - TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension, - long i; - for(i = 0; i < ri__size; i++) - ri__data[i*ri__stride] = i; - THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);) - } - else - { - TH_TENSOR_DIM_APPLY2(real, rt_, long, ri_, dimension, - long i; - for(i = 0; i < ri__size; i++) - ri__data[i*ri__stride] = i; - THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);) - } -} - -/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's -public domain implementation at http://ndevilla.free.fr/median/median/ -Adapted similarly to the above Quicksort algorithm. -This version does not produce indices along with values. */ -static void THTensor_(quickselectnoidx)(real *arr, long k, long elements, long stride) -{ - long P, L, R, i, j, swap; - real rswap, piv; - L = 0; - R = elements-1; - - do { - if (R <= L) /* One element only */ - return; - - if (R == L+1) { /* Two elements only */ - if (ARR(L) > ARR(R)) { - ARR_SWAP(L, R); - } - return; - } - - /* Use median of three for pivot choice */ - P=(L+R)>>1; - ARR_SWAP(P, L+1); - if (ARR(L+1) > ARR(R)) { ARR_SWAP(L+1, R); } - if (ARR(L) > ARR(R)) { ARR_SWAP(L, R); } - if (ARR(L+1) > ARR(L)) { ARR_SWAP(L+1, L); } - - i = L+1; - j = R; - piv = ARR(L); - do { - do i++; while(ARR(i) < piv); - do j--; while(ARR(j) > piv); - if (j < i) - break; - ARR_SWAP(i, j); - } while(1); - ARR_SWAP(L, j); - - /* Re-set active partition */ - if (j <= k) L=i; - if (j >= k) R=j-1; - } while(1); -} - -/* Implementation of the Quickselect algorithm, based on Nicolas Devillard's -public domain implementation at http://ndevilla.free.fr/median/median/ -Adapted similarly to the above Quicksort algorithm. */ -static void THTensor_(quickselect)(real *arr, long *idx, long k, long elements, long stride) -{ - long P, L, R, i, j, swap, pid; - real rswap, piv; - L = 0; - R = elements-1; - - do { - if (R <= L) /* One element only */ - return; - - if (R == L+1) { /* Two elements only */ - if (ARR(L) > ARR(R)) { - BOTH_SWAP(L, R); - } - return; - } - - /* Use median of three for pivot choice */ - P=(L+R)>>1; - BOTH_SWAP(P, L+1); - if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } - if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } - if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } - - i = L+1; - j = R; - piv = ARR(L); - pid = IDX(L); - do { - do i++; while(ARR(i) < piv); - do j--; while(ARR(j) > piv); - if (j < i) - break; - BOTH_SWAP(i, j); - } while(1); - BOTH_SWAP(L, j); - - /* Re-set active partition */ - if (j <= k) L=i; - if (j >= k) R=j-1; - } while(1); -} - -#undef ARR -#undef IDX -#undef LONG_SWAP -#undef REAL_SWAP -#undef BOTH_SWAP - -void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) -{ - THLongStorage *dim; - THTensor *temp_; - THLongTensor *tempi_; - real *temp__data; - long *tempi__data; - long t_size_dim; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); - - dim = THTensor_(newSizeOf)(t); - THLongStorage_set(dim, dimension, 1); - THTensor_(resize)(values_, dim, NULL); - THLongTensor_resize(indices_, dim, NULL); - THLongStorage_free(dim); - - t_size_dim = THTensor_(size)(t, dimension); - - temp_ = THTensor_(new)(); - THTensor_(resize1d)(temp_, t_size_dim); - temp__data = THTensor_(data)(temp_); - - tempi_ = THLongTensor_new(); - THLongTensor_resize1d(tempi_, t_size_dim); - tempi__data = THLongTensor_data(tempi_); - - TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, - long i; - real mode = 0; - long modei = 0; - long temp_freq = 0; - long max_freq = 0; - for(i = 0; i < t_size_dim; i++) - temp__data[i] = t_data[i*t_stride]; - for(i = 0; i < t_size_dim; i++) - tempi__data[i] = i; - THTensor_(quicksortascend)(temp__data, tempi__data, t_size_dim, 1); - - for(i = 0; i < t_size_dim; i++) - { - temp_freq++; - if ((i == t_size_dim - 1) || (temp__data[i] != temp__data[i+1])) - { - if (temp_freq > max_freq) - { - mode = temp__data[i]; - modei = tempi__data[i]; - max_freq = temp_freq; - } - temp_freq = 0; - } - } - *values__data = mode; - *indices__data = modei;); - - THTensor_(free)(temp_); - THLongTensor_free(tempi_); - if (!keepdim) { - THTensor_(squeeze1d)(values_, values_, dimension); - THLongTensor_squeeze1d(indices_, indices_, dimension); - } -} - -void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, long k, int dimension, int keepdim) -{ - THLongStorage *dim; - THTensor *temp_; - THLongTensor *tempi_; - real *temp__data; - long *tempi__data; - long t_size_dim; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); - THArgCheck(k > 0 && k <= t->size[dimension], 2, "selected index out of range"); - - dim = THTensor_(newSizeOf)(t); - THLongStorage_set(dim, dimension, 1); - THTensor_(resize)(values_, dim, NULL); - THLongTensor_resize(indices_, dim, NULL); - THLongStorage_free(dim); - - t_size_dim = THTensor_(size)(t, dimension); - - temp_ = THTensor_(new)(); - THTensor_(resize1d)(temp_, t_size_dim); - temp__data = THTensor_(data)(temp_); - - tempi_ = THLongTensor_new(); - THLongTensor_resize1d(tempi_, t_size_dim); - tempi__data = THLongTensor_data(tempi_); - - TH_TENSOR_DIM_APPLY3(real, t, real, values_, long, indices_, dimension, - long i; - for(i = 0; i < t_size_dim; i++) - temp__data[i] = t_data[i*t_stride]; - for(i = 0; i < t_size_dim; i++) - tempi__data[i] = i; - THTensor_(quickselect)(temp__data, tempi__data, k - 1, t_size_dim, 1); - *values__data = temp__data[k-1]; - *indices__data = tempi__data[k-1];); - - THTensor_(free)(temp_); - THLongTensor_free(tempi_); - if (!keepdim) { - THTensor_(squeeze1d)(values_, values_, dimension); - THLongTensor_squeeze1d(indices_, indices_, dimension); - } -} - -void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) -{ - long t_size_dim, k; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); - - t_size_dim = THTensor_(size)(t, dimension); - k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */ - - THTensor_(kthvalue)(values_, indices_, t, k+1, dimension, keepdim); -} - -void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, long k, int dim, int dir, int sorted) -{ - int numDims = THTensor_(nDimension)(t); - THArgCheck(dim >= 0 && dim < numDims, 3, "dim not in range"); - - long sliceSize = THTensor_(size)(t, dim); - THArgCheck(k > 0 && k <= sliceSize, 2, "k not in range for dimension"); - - THTensor *tmpResults = THTensor_(new)(); - THTensor_(resize1d)(tmpResults, sliceSize); - real *tmp__data = THTensor_(data)(tmpResults); - - THLongTensor *tmpIndices = THLongTensor_new(); - THLongTensor_resize1d(tmpIndices, sliceSize); - long *tmpi__data = THLongTensor_data(tmpIndices); - - THLongStorage *topKSize = THTensor_(newSizeOf)(t); - THLongStorage_set(topKSize, dim, k); - THTensor_(resize)(rt_, topKSize, NULL); - THLongTensor_resize(ri_, topKSize, NULL); - THLongStorage_free(topKSize); - - if (dir) { - /* k largest elements, descending order (optional: see sorted) */ - long K = sliceSize - k; - TH_TENSOR_DIM_APPLY3(real, t, real, rt_, long, ri_, dim, - long i; - for(i = 0; i < sliceSize; i++) - { - tmp__data[i] = t_data[i*t_stride]; - tmpi__data[i] = i; - } - if (K > 0) - THTensor_(quickselect)(tmp__data, tmpi__data, K - 1, sliceSize, 1); - if (sorted) - THTensor_(quicksortdescend)(tmp__data + K, tmpi__data + K, k, 1); - for(i = 0; i < k; i++) - { - rt__data[i*rt__stride] = tmp__data[i + K]; - ri__data[i*ri__stride] = tmpi__data[i + K]; - }) - } - else { - /* k smallest elements, ascending order (optional: see sorted) */ - TH_TENSOR_DIM_APPLY3(real, t, real, rt_, long, ri_, dim, - long i; - for(i = 0; i < sliceSize; i++) - { - tmp__data[i] = t_data[i*t_stride]; - tmpi__data[i] = i; - } - THTensor_(quickselect)(tmp__data, tmpi__data, k - 1, sliceSize, 1); - if (sorted) - THTensor_(quicksortascend)(tmp__data, tmpi__data, k - 1, 1); - for(i = 0; i < k; i++) - { - rt__data[i*rt__stride] = tmp__data[i]; - ri__data[i*ri__stride] = tmpi__data[i]; - }) - } - - THTensor_(free)(tmpResults); - THLongTensor_free(tmpIndices); -} - -void THTensor_(tril)(THTensor *r_, THTensor *t, long k) -{ - long t_size_0, t_size_1; - long t_stride_0, t_stride_1; - long r__stride_0, r__stride_1; - real *t_data, *r__data; - long r, c; - - THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); - - THTensor_(resizeAs)(r_, t); - - t_size_0 = THTensor_(size)(t, 0); - t_size_1 = THTensor_(size)(t, 1); - t_stride_0 = THTensor_(stride)(t, 0); - t_stride_1 = THTensor_(stride)(t, 1); - r__stride_0 = THTensor_(stride)(r_, 0); - r__stride_1 = THTensor_(stride)(r_, 1); - r__data = THTensor_(data)(r_); - t_data = THTensor_(data)(t); - - for(r = 0; r < t_size_0; r++) - { - long sz = THMin(r+k+1, t_size_1); - for(c = THMax(0, r+k+1); c < t_size_1; c++) - r__data[r*r__stride_0+c*r__stride_1] = 0; - for(c = 0; c < sz; c++) - r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; - } -} - -void THTensor_(triu)(THTensor *r_, THTensor *t, long k) -{ - long t_size_0, t_size_1; - long t_stride_0, t_stride_1; - long r__stride_0, r__stride_1; - real *t_data, *r__data; - long r, c; - - THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); - - THTensor_(resizeAs)(r_, t); - - t_size_0 = THTensor_(size)(t, 0); - t_size_1 = THTensor_(size)(t, 1); - t_stride_0 = THTensor_(stride)(t, 0); - t_stride_1 = THTensor_(stride)(t, 1); - r__stride_0 = THTensor_(stride)(r_, 0); - r__stride_1 = THTensor_(stride)(r_, 1); - r__data = THTensor_(data)(r_); - t_data = THTensor_(data)(t); - - for(r = 0; r < t_size_0; r++) - { - long sz = THMin(r+k, t_size_1); - for(c = THMax(0, r+k); c < t_size_1; c++) - r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; - for(c = 0; c < sz; c++) - r__data[r*r__stride_0+c*r__stride_1] = 0; - } -} - -void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension) -{ - THTensor* inputs[2]; - inputs[0] = ta; - inputs[1] = tb; - THTensor_(catArray)(r_, inputs, 2, dimension); -} - -void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension) -{ - THLongStorage *size; - int i, j; - long offset; - int maxDim = dimension + 1; - int allEmpty = 1; - int allContiguous = 1; - - // cat_dimension is the actual dimension we cat along - int cat_dimension = dimension; - - for (i = 0; i < numInputs; i++) - { - maxDim = THMax(maxDim, inputs[i]->nDimension); - } - - // When the user input dimension is -1 (i.e. -2 in C) - // Then we pick the maximum last dimension across all tensors. - if ( dimension + TH_INDEX_BASE == -1 ) - { - cat_dimension = maxDim?(maxDim-1):0; - } - - THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); - THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE); - - size = THLongStorage_newWithSize(maxDim); - - for(i = 0; i < maxDim; i++) - { - // dimSize is either the size of the dim if it exists, either 1 if #dim > 0, otherwise 0 - long dimSize = i < inputs[0]->nDimension ? inputs[0]->size[i] : THMin(inputs[0]->nDimension, 1); - if (i == cat_dimension) - { - for (j = 1; j < numInputs; j++) - { - // accumulate the size over the dimension we want to cat on. - // Empty tensors are allowed - dimSize += i < inputs[j]->nDimension ? inputs[j]->size[i] : THMin(inputs[j]->nDimension, 1); - } - } - else - { - for (j = 1; j < numInputs; j++) - { - long sz = (i < inputs[j]->nDimension ? inputs[j]->size[i] : THMin(inputs[j]->nDimension, 1)); - // If it's a dimension we're not catting on - // Then fail if sizes are different AND > 0 - if (dimSize != sz && dimSize && sz) - { - THLongStorage_free(size); - THError("inconsistent tensor sizes"); - } - else if(!dimSize) - { - dimSize = sz; - } - } - } - allEmpty = allEmpty && !dimSize; - size->data[i] = dimSize; - } - - // Initiate catting and resizing - // If at least one of the input is not empty - if (!allEmpty) - { - THTensor_(resize)(result, size, NULL); - - // Check contiguity of all inputs and result - for (i = 0; i < numInputs; i++) { - if(inputs[i]->nDimension) { - allContiguous = allContiguous && THTensor_(isContiguous)(inputs[i]); - } - } - allContiguous = allContiguous && THTensor_(isContiguous)(result); - - // First path is for contiguous inputs along dim 1 - // Second path for non-contiguous - if (cat_dimension == 0 && allContiguous) - { - real* result_data = result->storage->data + result->storageOffset; - offset = 0; - for (j = 0; j < numInputs; j++) - { - if (inputs[j]->nDimension) - { - THTensor* input0 = inputs[j]; - real* input0_data = input0->storage->data + input0->storageOffset; - long input0_size = THTensor_(nElement)(input0); - memcpy(result_data + offset, input0_data, input0_size*sizeof(real)); - offset += input0_size; - } - } - } - else - { - offset = 0; - for (j = 0; j < numInputs; j++) - { - if (inputs[j]->nDimension) - { - long dimSize = cat_dimension < inputs[j]->nDimension ? inputs[j]->size[cat_dimension] : 1; - THTensor *nt = THTensor_(newWithTensor)(result); - THTensor_(narrow)(nt, NULL, cat_dimension, offset, dimSize); - THTensor_(copy)(nt, inputs[j]); - THTensor_(free)(nt); - offset += dimSize; - } - } - } - } - THLongStorage_free(size); -} - -int THTensor_(equal)(THTensor *ta, THTensor* tb) -{ - int equal = 1; - if(!THTensor_(isSameSizeAs)(ta, tb)) - return 0; - - if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) { - real *tap = THTensor_(data)(ta); - real *tbp = THTensor_(data)(tb); - ptrdiff_t sz = THTensor_(nElement)(ta); - ptrdiff_t i; - for (i=0; inDimension, t->size, NULL); \ - TH_TENSOR_APPLY2(unsigned char, r_, real, t, \ - *r__data = (*t_data OP value) ? 1 : 0;); \ - } \ - void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \ - { \ - THTensor_(resizeNd)(r_, t->nDimension, t->size, NULL); \ - TH_TENSOR_APPLY2(real, r_, real, t, \ - *r__data = (*t_data OP value) ? 1 : 0;); \ - } \ - void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \ - { \ - THByteTensor_resizeNd(r_, ta->nDimension, ta->size, NULL); \ - TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \ - *r__data = (*ta_data OP *tb_data) ? 1 : 0;); \ - } \ - void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \ - { \ - THTensor_(resizeNd)(r_, ta->nDimension, ta->size, NULL); \ - TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \ - *r__data = (*ta_data OP *tb_data) ? 1 : 0;); \ - } \ - - -TENSOR_IMPLEMENT_LOGICAL(lt,<) -TENSOR_IMPLEMENT_LOGICAL(gt,>) -TENSOR_IMPLEMENT_LOGICAL(le,<=) -TENSOR_IMPLEMENT_LOGICAL(ge,>=) -TENSOR_IMPLEMENT_LOGICAL(eq,==) -TENSOR_IMPLEMENT_LOGICAL(ne,!=) - -#define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \ - void THTensor_(NAME)(THTensor *r_, THTensor *t) \ - { \ - THTensor_(resizeAs)(r_, t); \ - TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \ - } \ - -#define LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(NAME, CFUNC) \ - void THTensor_(NAME)(THTensor *r_, THTensor *t, real value) \ - { \ - THTensor_(resizeAs)(r_, t); \ - TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data, value);); \ - } \ - -#if defined(TH_REAL_IS_LONG) -LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs) -#endif /* long only part */ - -#if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT) -LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs) -#endif /* int only part */ - -#if defined(TH_REAL_IS_BYTE) - -#define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \ - int THTensor_(NAME)(THTensor *tensor) \ - { \ - THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); \ - int sum = INIT_VALUE; \ - TH_TENSOR_APPLY(real, tensor, sum = sum OP *tensor_data;); \ - return sum; \ - } - -TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &&, 1) -TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, ||, 0) - -#endif /* Byte only part */ - -/* floating point only now */ -#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) - -#if defined (TH_REAL_IS_FLOAT) -#define TH_MATH_NAME(fn) fn##f -#else -#define TH_MATH_NAME(fn) fn -#endif - -LAB_IMPLEMENT_BASIC_FUNCTION(log,TH_MATH_NAME(log)) -LAB_IMPLEMENT_BASIC_FUNCTION(lgamma,TH_MATH_NAME(lgamma)) -LAB_IMPLEMENT_BASIC_FUNCTION(log1p,TH_MATH_NAME(log1p)) -LAB_IMPLEMENT_BASIC_FUNCTION(sigmoid,TH_MATH_NAME(TH_sigmoid)) -LAB_IMPLEMENT_BASIC_FUNCTION(exp,TH_MATH_NAME(exp)) -LAB_IMPLEMENT_BASIC_FUNCTION(cos,TH_MATH_NAME(cos)) -LAB_IMPLEMENT_BASIC_FUNCTION(acos,TH_MATH_NAME(acos)) -LAB_IMPLEMENT_BASIC_FUNCTION(cosh,TH_MATH_NAME(cosh)) -LAB_IMPLEMENT_BASIC_FUNCTION(sin,TH_MATH_NAME(sin)) -LAB_IMPLEMENT_BASIC_FUNCTION(asin,TH_MATH_NAME(asin)) -LAB_IMPLEMENT_BASIC_FUNCTION(sinh,TH_MATH_NAME(sinh)) -LAB_IMPLEMENT_BASIC_FUNCTION(tan,TH_MATH_NAME(tan)) -LAB_IMPLEMENT_BASIC_FUNCTION(atan,TH_MATH_NAME(atan)) -LAB_IMPLEMENT_BASIC_FUNCTION(tanh,TH_MATH_NAME(tanh)) -LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(pow,TH_MATH_NAME(pow)) -LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,TH_MATH_NAME(sqrt)) -LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_MATH_NAME(TH_rsqrt)) -LAB_IMPLEMENT_BASIC_FUNCTION(ceil,TH_MATH_NAME(ceil)) -LAB_IMPLEMENT_BASIC_FUNCTION(floor,TH_MATH_NAME(floor)) -LAB_IMPLEMENT_BASIC_FUNCTION(round,TH_MATH_NAME(round)) -LAB_IMPLEMENT_BASIC_FUNCTION(abs,TH_MATH_NAME(fabs)) -LAB_IMPLEMENT_BASIC_FUNCTION(trunc,TH_MATH_NAME(trunc)) -LAB_IMPLEMENT_BASIC_FUNCTION(frac,TH_MATH_NAME(TH_frac)) -LAB_IMPLEMENT_BASIC_FUNCTION(neg,-) -LAB_IMPLEMENT_BASIC_FUNCTION(cinv, TH_MATH_NAME(1.0) / ) - - -void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty) -{ - THTensor_(resizeAs)(r_, tx); - TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = TH_MATH_NAME(atan2)(*tx_data,*ty_data);); -} - -void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight) -{ - THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match"); - THTensor_(resizeAs)(r_, a); - TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_MATH_NAME(TH_lerp)(*a_data, *b_data, weight);); -} - -void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim) -{ - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", - dimension + TH_INDEX_BASE); - - THTensor_(sum)(r_, t, dimension, keepdim); - THTensor_(div)(r_, r_, t->size[dimension]); -} - -void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int flag, int keepdim) -{ - THLongStorage *dim; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", - dimension + TH_INDEX_BASE); - - dim = THTensor_(newSizeOf)(t); - THLongStorage_set(dim, dimension, 1); - THTensor_(resize)(r_, dim, NULL); - THLongStorage_free(dim); - - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, - accreal sum = 0; - accreal sum2 = 0; - long i; - for(i = 0; i < t_size; i++) - { - real z = t_data[i*t_stride]; - sum += z; - sum2 += z*z; - } - - if(flag) - { - sum /= t_size; - sum2 /= t_size; - sum2 -= sum*sum; - sum2 = (sum2 < 0 ? 0 : sum2); - *r__data = (real)TH_MATH_NAME(sqrt)(sum2); - } - else - { - sum /= t_size; - sum2 /= t_size-1; - sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum; - sum2 = (sum2 < 0 ? 0 : sum2); - *r__data = (real)TH_MATH_NAME(sqrt)(sum2); - }); - - if (!keepdim) { - THTensor_(squeeze1d)(r_, r_, dimension); - } -} - -void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int flag, int keepdim) -{ - THLongStorage *dim; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", - dimension + TH_INDEX_BASE); - - dim = THTensor_(newSizeOf)(t); - THLongStorage_set(dim, dimension, 1); - THTensor_(resize)(r_, dim, NULL); - THLongStorage_free(dim); - - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, - accreal sum = 0; - accreal sum2 = 0; - long i; - for(i = 0; i < t_size; i++) - { - real z = t_data[i*t_stride]; - sum += z; - sum2 += z*z; - } - - if(flag) - { - sum /= t_size; - sum2 /= t_size; - sum2 -= sum*sum; - sum2 = (sum2 < 0 ? 0 : sum2); - *r__data = sum2; - } - else - { - sum /= t_size; - sum2 /= t_size-1; - sum2 -= ((real)t_size)/((real)(t_size-1))*sum*sum; - sum2 = (sum2 < 0 ? 0 : sum2); - *r__data = (real)sum2; - }); - - if (!keepdim) { - THTensor_(squeeze1d)(r_, r_, dimension); - } -} - -void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int keepdim) -{ - THLongStorage *dim; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", - dimension + TH_INDEX_BASE); - - dim = THTensor_(newSizeOf)(t); - THLongStorage_set(dim, dimension, 1); - THTensor_(resize)(r_, dim, NULL); - THLongStorage_free(dim); - - if(value == 0) { - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, - accreal sum = 0; - long i; - for(i = 0; i < t_size; i++) - sum += t_data[i*t_stride] != 0.0; - *r__data = sum;) - } else { - TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, - accreal sum = 0; - long i; - for(i = 0; i < t_size; i++) { - sum += TH_MATH_NAME(pow)( - TH_MATH_NAME(fabs)(t_data[i*t_stride]), value); - } - *r__data = TH_MATH_NAME(pow)(sum, 1.0/value);) - } - - if (!keepdim) { - THTensor_(squeeze1d)(r_, r_, dimension); - } -} - -accreal THTensor_(normall)(THTensor *tensor, real value) -{ - accreal sum = 0; - if(value == 0) { - TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;); - return sum; - } else if(value == 1) { - TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(fabs)(*tensor_data);); - return sum; - } else if(value == 2) { - TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;); - return sqrt(sum); - } else { - TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*tensor_data), value);); - return TH_MATH_NAME(pow)(sum, 1.0/value); - } -} - -void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm) -{ - int i; - THTensor *rowR, *rowS; - - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d", - dimension + TH_INDEX_BASE); - THArgCheck(value > 0, 2, "non-positive-norm not supported"); - THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions", - THTensor_(nDimension)(src)); - - rowR = THTensor_(new)(); - rowS = THTensor_(new)(); - - THTensor_(resizeAs)(res, src); - - for (i=0; isize[dimension]; i++) - { - real norm = 0; - real new_norm; - - THTensor_(select)(rowS, src, dimension, i); - THTensor_(select)(rowR, res, dimension, i); - if (value == 1) { - TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data);); - } else if (value == 2) { - TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;); - } else { - TH_TENSOR_APPLY(real, rowS, norm += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*rowS_data), value);); - } - - norm = pow(norm, 1/value); - - if (norm > maxnorm) - { - new_norm = maxnorm / (norm + 1e-7); - - TH_TENSOR_APPLY2( - real, rowR, real, rowS, - *rowR_data = (*rowS_data) * new_norm; - ) - } - else - THTensor_(copy)(rowR, rowS); - } - - THTensor_(free)(rowR); - THTensor_(free)(rowS); -} - -accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value) -{ - real sum = 0; - TH_TENSOR_APPLY2(real, tensor, real, src, - sum += TH_MATH_NAME(pow)( - TH_MATH_NAME(fabs)(*tensor_data - *src_data), value);); - return TH_MATH_NAME(pow)(sum, 1.0/value); -} - -accreal THTensor_(meanall)(THTensor *tensor) -{ - THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); - return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor); -} - -accreal THTensor_(varall)(THTensor *tensor) -{ - accreal mean = THTensor_(meanall)(tensor); - accreal sum = 0; - TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean);); - sum /= (THTensor_(nElement)(tensor)-1); - return sum; -} - -accreal THTensor_(stdall)(THTensor *tensor) -{ - return sqrt(THTensor_(varall)(tensor)); -} - -void THTensor_(linspace)(THTensor *r_, real a, real b, long n) -{ - real i = 0; - - THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); - - if (THTensor_(nElement)(r_) != n) { - THTensor_(resize1d)(r_, n); - } - - if(n == 1) { - TH_TENSOR_APPLY(real, r_, - *r__data = a; - i++; - ); - } else { - TH_TENSOR_APPLY(real, r_, - *r__data = a + i*(b-a)/((real)(n-1)); - i++; - ); - } -} - -void THTensor_(logspace)(THTensor *r_, real a, real b, long n) -{ - real i = 0; - - THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); - - if (THTensor_(nElement)(r_) != n) { - THTensor_(resize1d)(r_, n); - } - - if(n == 1) { - TH_TENSOR_APPLY(real, r_, - *r__data = TH_MATH_NAME(pow)(10.0, a); - i++; - ); - } else { - TH_TENSOR_APPLY(real, r_, - *r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((real)(n-1))); - i++; - ); - } -} - -void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size) -{ - THTensor_(resize)(r_, size, NULL); - THTensor_(uniform)(r_, _generator, 0, 1); -} - -void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size) -{ - THTensor_(resize)(r_, size, NULL); - THTensor_(normal)(r_, _generator, 0, 1); -} - -void THTensor_(histc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue) -{ - real minval; - real maxval; - real *h_data; - - THTensor_(resize1d)(hist, nbins); - THTensor_(zero)(hist); - minval = minvalue; - maxval = maxvalue; - if (minval == maxval) - { - minval = THTensor_(minall)(tensor); - maxval = THTensor_(maxall)(tensor); - } - if (minval == maxval) - { - minval = minval - 1; - maxval = maxval + 1; - } - - h_data = THTensor_(data)(hist); - - TH_TENSOR_APPLY(real, tensor, - if (*tensor_data >= minval && *tensor_data <= maxval) { - const int bin = (int)((*tensor_data-minval) / (maxval-minval) * nbins); - h_data[THMin(bin, nbins-1)] += 1; - } - ); -} - -void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue) -{ - THArgCheck(THTensor_(nDimension)(tensor) < 3, 2, "invalid dimension %d, the input must be a 2d tensor", THTensor_(nDimension)(tensor)); - - int dimension = 1; - THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(tensor), 2, "invalid dimension %d", - dimension + TH_INDEX_BASE); - - real minval; - real maxval; - real *h_data; - - THTensor_(resize2d)(hist, tensor->size[0], nbins); - THTensor_(zero)(hist); - - minval = minvalue; - maxval = maxvalue; - if (minval == maxval) - { - minval = THTensor_(minall)(tensor); - maxval = THTensor_(maxall)(tensor); - } - if (minval == maxval) - { - minval = minval - 1; - maxval = maxval + 1; - } - - TH_TENSOR_DIM_APPLY2(real, tensor, real, hist, dimension, long i; - for(i = 0; i < tensor_size; i++) - { - if(tensor_data[i*tensor_stride] >= minval && tensor_data[i*tensor_stride] <= maxval) { - const int bin = (int)((tensor_data[i*tensor_stride]-minval) / (maxval-minval) * nbins); - hist_data[THMin(bin, nbins-1)] += 1; - } - } - ); -} - -#undef TH_MATH_NAME -#endif /* floating point only part */ -#undef IS_NONZERO -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensorMath.h b/contrib/lua-torch/torch7/lib/TH/generic/THTensorMath.h deleted file mode 100644 index 17e54ccf6b..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensorMath.h +++ /dev/null @@ -1,198 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensorMath.h" -#else - -TH_API void THTensor_(fill)(THTensor *r_, real value); -TH_API void THTensor_(zero)(THTensor *r_); - -TH_API void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value); -TH_API void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src); -TH_API void THTensor_(maskedSelect)(THTensor *tensor, THTensor* src, THByteTensor *mask); - -TH_API void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor); - -TH_API void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index); -TH_API void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src); -TH_API void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src); -TH_API void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val); - -TH_API void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index); -TH_API void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src); -TH_API void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src); -TH_API void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val); - -TH_API accreal THTensor_(dot)(THTensor *t, THTensor *src); - -TH_API real THTensor_(minall)(THTensor *t); -TH_API real THTensor_(maxall)(THTensor *t); -TH_API real THTensor_(medianall)(THTensor *t); -TH_API accreal THTensor_(sumall)(THTensor *t); -TH_API accreal THTensor_(prodall)(THTensor *t); - -TH_API void THTensor_(neg)(THTensor *self, THTensor *src); -TH_API void THTensor_(cinv)(THTensor *self, THTensor *src); - -TH_API void THTensor_(add)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(sub)(THTensor *self, THTensor *src, real value); -TH_API void THTensor_(mul)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(div)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(lshift)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(rshift)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(fmod)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(remainder)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value); -TH_API void THTensor_(bitand)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(bitor)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(bitxor)(THTensor *r_, THTensor *t, real value); - -TH_API void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src); -TH_API void THTensor_(csub)(THTensor *self, THTensor *src1, real value, THTensor *src2); -TH_API void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src); -TH_API void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src); -TH_API void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src); -TH_API void THTensor_(clshift)(THTensor *r_, THTensor *t, THTensor *src); -TH_API void THTensor_(crshift)(THTensor *r_, THTensor *t, THTensor *src); -TH_API void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src); -TH_API void THTensor_(cremainder)(THTensor *r_, THTensor *t, THTensor *src); -TH_API void THTensor_(cbitand)(THTensor *r_, THTensor *t, THTensor *src); -TH_API void THTensor_(cbitor)(THTensor *r_, THTensor *t, THTensor *src); -TH_API void THTensor_(cbitxor)(THTensor *r_, THTensor *t, THTensor *src); - -TH_API void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2); -TH_API void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2); - -TH_API void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec); -TH_API void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat1, THTensor *mat2); -TH_API void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2); - -TH_API void THTensor_(addbmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2); -TH_API void THTensor_(baddbmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2); - -TH_API void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain); - -TH_API ptrdiff_t THTensor_(numel)(THTensor *t); -TH_API void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim); -TH_API void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim); -TH_API void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, long k, int dimension, int keepdim); -TH_API void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim); -TH_API void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim); -TH_API void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim); -TH_API void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim); -TH_API void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension); -TH_API void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension); -TH_API void THTensor_(sign)(THTensor *r_, THTensor *t); -TH_API accreal THTensor_(trace)(THTensor *t); -TH_API void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension); - -TH_API void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src); -TH_API void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src); -TH_API void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value); -TH_API void THTensor_(cminValue)(THTensor *r, THTensor *t, real value); - -TH_API void THTensor_(zeros)(THTensor *r_, THLongStorage *size); -TH_API void THTensor_(ones)(THTensor *r_, THLongStorage *size); -TH_API void THTensor_(diag)(THTensor *r_, THTensor *t, int k); -TH_API void THTensor_(eye)(THTensor *r_, long n, long m); -TH_API void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step); -TH_API void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step); -TH_API void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, long n); - -TH_API void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size); -TH_API void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder); -TH_API void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, long k, int dim, int dir, int sorted); -TH_API void THTensor_(tril)(THTensor *r_, THTensor *t, long k); -TH_API void THTensor_(triu)(THTensor *r_, THTensor *t, long k); -TH_API void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension); -TH_API void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension); - -TH_API int THTensor_(equal)(THTensor *ta, THTensor *tb); - -TH_API void THTensor_(ltValue)(THByteTensor *r_, THTensor* t, real value); -TH_API void THTensor_(leValue)(THByteTensor *r_, THTensor* t, real value); -TH_API void THTensor_(gtValue)(THByteTensor *r_, THTensor* t, real value); -TH_API void THTensor_(geValue)(THByteTensor *r_, THTensor* t, real value); -TH_API void THTensor_(neValue)(THByteTensor *r_, THTensor* t, real value); -TH_API void THTensor_(eqValue)(THByteTensor *r_, THTensor* t, real value); - -TH_API void THTensor_(ltValueT)(THTensor *r_, THTensor* t, real value); -TH_API void THTensor_(leValueT)(THTensor *r_, THTensor* t, real value); -TH_API void THTensor_(gtValueT)(THTensor *r_, THTensor* t, real value); -TH_API void THTensor_(geValueT)(THTensor *r_, THTensor* t, real value); -TH_API void THTensor_(neValueT)(THTensor *r_, THTensor* t, real value); -TH_API void THTensor_(eqValueT)(THTensor *r_, THTensor* t, real value); - -TH_API void THTensor_(ltTensor)(THByteTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(leTensor)(THByteTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(gtTensor)(THByteTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(geTensor)(THByteTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(neTensor)(THByteTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(eqTensor)(THByteTensor *r_, THTensor *ta, THTensor *tb); - -TH_API void THTensor_(ltTensorT)(THTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(leTensorT)(THTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(gtTensorT)(THTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(geTensorT)(THTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(neTensorT)(THTensor *r_, THTensor *ta, THTensor *tb); -TH_API void THTensor_(eqTensorT)(THTensor *r_, THTensor *ta, THTensor *tb); - -#if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT) || defined(TH_REAL_IS_LONG) -TH_API void THTensor_(abs)(THTensor *r_, THTensor *t); -#endif - -#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) - -TH_API void THTensor_(sigmoid)(THTensor *r_, THTensor *t); -TH_API void THTensor_(log)(THTensor *r_, THTensor *t); -TH_API void THTensor_(lgamma)(THTensor *r_, THTensor *t); -TH_API void THTensor_(log1p)(THTensor *r_, THTensor *t); -TH_API void THTensor_(exp)(THTensor *r_, THTensor *t); -TH_API void THTensor_(cos)(THTensor *r_, THTensor *t); -TH_API void THTensor_(acos)(THTensor *r_, THTensor *t); -TH_API void THTensor_(cosh)(THTensor *r_, THTensor *t); -TH_API void THTensor_(sin)(THTensor *r_, THTensor *t); -TH_API void THTensor_(asin)(THTensor *r_, THTensor *t); -TH_API void THTensor_(sinh)(THTensor *r_, THTensor *t); -TH_API void THTensor_(tan)(THTensor *r_, THTensor *t); -TH_API void THTensor_(atan)(THTensor *r_, THTensor *t); -TH_API void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty); -TH_API void THTensor_(tanh)(THTensor *r_, THTensor *t); -TH_API void THTensor_(pow)(THTensor *r_, THTensor *t, real value); -TH_API void THTensor_(tpow)(THTensor *r_, real value, THTensor *t); -TH_API void THTensor_(sqrt)(THTensor *r_, THTensor *t); -TH_API void THTensor_(rsqrt)(THTensor *r_, THTensor *t); -TH_API void THTensor_(ceil)(THTensor *r_, THTensor *t); -TH_API void THTensor_(floor)(THTensor *r_, THTensor *t); -TH_API void THTensor_(round)(THTensor *r_, THTensor *t); -TH_API void THTensor_(abs)(THTensor *r_, THTensor *t); -TH_API void THTensor_(trunc)(THTensor *r_, THTensor *t); -TH_API void THTensor_(frac)(THTensor *r_, THTensor *t); -TH_API void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight); - -TH_API void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim); -TH_API void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int flag, int keepdim); -TH_API void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int flag, int keepdim); -TH_API void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int keepdim); -TH_API void THTensor_(renorm)(THTensor *r_, THTensor *t, real value, int dimension, real maxnorm); -TH_API accreal THTensor_(dist)(THTensor *a, THTensor *b, real value); -TH_API void THTensor_(histc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue); -TH_API void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, long nbins, real minvalue, real maxvalue); - -TH_API accreal THTensor_(meanall)(THTensor *self); -TH_API accreal THTensor_(varall)(THTensor *self); -TH_API accreal THTensor_(stdall)(THTensor *self); -TH_API accreal THTensor_(normall)(THTensor *t, real value); - -TH_API void THTensor_(linspace)(THTensor *r_, real a, real b, long n); -TH_API void THTensor_(logspace)(THTensor *r_, real a, real b, long n); -TH_API void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size); -TH_API void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size); -#endif - -#if defined(TH_REAL_IS_BYTE) - -TH_API int THTensor_(logicalall)(THTensor *self); -TH_API int THTensor_(logicalany)(THTensor *self); - -#endif /* TH_REAL_IS_BYTE */ - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensorRandom.c b/contrib/lua-torch/torch7/lib/TH/generic/THTensorRandom.c deleted file mode 100644 index 514d3dd278..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensorRandom.c +++ /dev/null @@ -1,250 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensorRandom.c" -#else - -void THTensor_(random)(THTensor *self, THGenerator *_generator) -{ -#if defined(TH_REAL_IS_BYTE) - TH_TENSOR_APPLY(real, self, *self_data = (unsigned char)(THRandom_random(_generator) % (UCHAR_MAX+1));); -#elif defined(TH_REAL_IS_CHAR) - TH_TENSOR_APPLY(real, self, *self_data = (char)(THRandom_random(_generator) % (CHAR_MAX+1));); -#elif defined(TH_REAL_IS_SHORT) - TH_TENSOR_APPLY(real, self, *self_data = (short)(THRandom_random(_generator) % (SHRT_MAX+1));); -#elif defined(TH_REAL_IS_INT) - TH_TENSOR_APPLY(real, self, *self_data = (int)(THRandom_random(_generator) % (INT_MAX+1UL));); -#elif defined(TH_REAL_IS_LONG) - TH_TENSOR_APPLY(real, self, *self_data = (long)(THRandom_random(_generator) % (LONG_MAX+1UL));); -#elif defined(TH_REAL_IS_FLOAT) - TH_TENSOR_APPLY(real, self, *self_data = (float)(THRandom_random(_generator) % ((1UL << FLT_MANT_DIG)+1));); -#elif defined(TH_REAL_IS_DOUBLE) - TH_TENSOR_APPLY(real, self, *self_data = (double)(THRandom_random(_generator) % ((1ULL << DBL_MANT_DIG)+1));); -#else -#error "Unknown type" -#endif -} - -void THTensor_(geometric)(THTensor *self, THGenerator *_generator, double p) -{ - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_geometric(_generator, p);); -} - -void THTensor_(bernoulli)(THTensor *self, THGenerator *_generator, double p) -{ - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_bernoulli(_generator, p);); -} - -void THTensor_(bernoulli_FloatTensor)(THTensor *self, THGenerator *_generator, THFloatTensor *p) -{ - TH_TENSOR_APPLY2(real, self, float, p, *self_data = (real)THRandom_bernoulli(_generator, (double)*p_data);); -} - -void THTensor_(bernoulli_DoubleTensor)(THTensor *self, THGenerator *_generator, THDoubleTensor *p) -{ - TH_TENSOR_APPLY2(real, self, double, p, *self_data = (real)THRandom_bernoulli(_generator, (double)*p_data);); -} - -#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) - -void THTensor_(uniform)(THTensor *self, THGenerator *_generator, double a, double b) -{ - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_uniform(_generator, a, b);); -} - -void THTensor_(normal)(THTensor *self, THGenerator *_generator, double mean, double stdv) -{ - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_normal(_generator, mean, stdv);); -} - -void THTensor_(exponential)(THTensor *self, THGenerator *_generator, double lambda) -{ - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_exponential(_generator, lambda);); -} - -void THTensor_(cauchy)(THTensor *self, THGenerator *_generator, double median, double sigma) -{ - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_cauchy(_generator, median, sigma);); -} - -void THTensor_(logNormal)(THTensor *self, THGenerator *_generator, double mean, double stdv) -{ - TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_logNormal(_generator, mean, stdv);); -} - -void THTensor_(multinomial)(THLongTensor *self, THGenerator *_generator, THTensor *prob_dist, int n_sample, int with_replacement) -{ - int start_dim = THTensor_(nDimension)(prob_dist); - long n_dist; - long n_categories; - THDoubleTensor* cum_dist; - int i,j,k; - - if (start_dim == 1) - { - THTensor_(resize2d)(prob_dist, 1, THTensor_(size)(prob_dist, 0)); - } - - n_dist = THTensor_(size)(prob_dist, 0); - n_categories = THTensor_(size)(prob_dist, 1); - - THArgCheck(n_sample > 0, 2, "cannot sample n_sample < 0 samples"); - - if (!with_replacement) - { - THArgCheck((!with_replacement) && (n_sample <= n_categories), 2, \ - "cannot sample n_sample > prob_dist:size(1) samples without replacement"); - } - - /* cumulative probability distribution vector */ - cum_dist = THDoubleTensor_newWithSize1d(n_categories); - - /* will contain multinomial samples (category indices to be returned) */ - THLongTensor_resize2d(self, n_dist , n_sample); - - for (i=0; istorage, \ - prob_dist->storageOffset+i*prob_dist->stride[0]+j*prob_dist->stride[1] \ - ); - THDoubleStorage_set( - cum_dist->storage, \ - cum_dist->storageOffset+j*cum_dist->stride[0], \ - sum \ - ); - } - THArgCheckWithCleanup((sum > 0), THCleanup(THDoubleTensor_free(cum_dist);), 2, - "invalid multinomial distribution (sum of probabilities <= 0)"); - /* normalize cumulative probability distribution so that last val is 1 - i.e. doesn't assume original prob_dist row sums to one */ - if ( (sum > 0) || ( ( sum < 1.00001) && (sum > 0.99999) ) ) - { - for (j=0; jstride[0]] /= sum; - } - } - - for (j=0; jstride[0]] = 1; - - while(right_pointer - left_pointer > 0) - { - mid_pointer = left_pointer + (right_pointer - left_pointer) / 2; - cum_prob = THDoubleStorage_get( \ - cum_dist->storage, \ - cum_dist->storageOffset+mid_pointer*cum_dist->stride[0] \ - ); - if (cum_prob < uniform_sample) - { - left_pointer = mid_pointer + 1; - } - else - { - right_pointer = mid_pointer; - } - } - sample_idx = left_pointer; - - /* store in result tensor (will be incremented for lua compat by wrapper) */ - THLongStorage_set( \ - self->storage, \ - self->storageOffset+i*self->stride[0]+j*self->stride[1], \ - sample_idx \ - ); - - /* Once a sample is drawn, it cannot be drawn again. ie sample without replacement */ - if (!with_replacement) - { - /* update cumulative distribution so that sample cannot be drawn again */ - double diff; - double new_val = 0; - double sum; - - if (sample_idx != 0) - { - new_val = THDoubleStorage_get( \ - cum_dist->storage, \ - cum_dist->storageOffset+(sample_idx-1)*cum_dist->stride[0] \ - ); - } - /* marginal cumulative mass (i.e. original probability) of sample */ - diff = THDoubleStorage_get( \ - cum_dist->storage, \ - cum_dist->storageOffset+sample_idx*cum_dist->stride[0] \ - ) - new_val; - /* new sum of marginals is not one anymore... */ - sum = 1.0 - diff; - for (k=0; kstorage, \ - cum_dist->storageOffset+k*cum_dist->stride[0] \ - ); - if (k >= sample_idx) - { - /* remove sampled probability mass from later cumulative probabilities */ - new_val -= diff; - } - /* make total marginals sum to one */ - new_val /= sum; - THDoubleStorage_set( \ - cum_dist->storage, \ - cum_dist->storageOffset+k*cum_dist->stride[0], \ - new_val \ - ); - } - } - } - } - - THDoubleTensor_free(cum_dist); - - if (start_dim == 1) - { - THLongTensor_resize1d(self, n_sample); - THTensor_(resize1d)(prob_dist, n_categories); - } -} - -#endif - -#if defined(TH_REAL_IS_BYTE) -void THTensor_(getRNGState)(THGenerator *_generator, THTensor *self) -{ - static const size_t size = sizeof(THGenerator); - THGenerator *rng_state; - THTensor_(resize1d)(self, size); - THArgCheck(THTensor_(nElement)(self) == size, 1, "RNG state is wrong size"); - THArgCheck(THTensor_(isContiguous)(self), 1, "RNG state needs to be contiguous"); - rng_state = (THGenerator *)THTensor_(data)(self); - THGenerator_copy(rng_state, _generator); -} - -void THTensor_(setRNGState)(THGenerator *_generator, THTensor *self) -{ - static const size_t size = sizeof(THGenerator); - THGenerator *rng_state; - THArgCheck(THTensor_(nElement)(self) == size, 1, "RNG state is wrong size"); - THArgCheck(THTensor_(isContiguous)(self), 1, "RNG state needs to be contiguous"); - rng_state = (THGenerator *)THTensor_(data)(self); - THArgCheck(THGenerator_isValid(rng_state), 1, "Invalid RNG state"); - THGenerator_copy(_generator, rng_state); -} -#endif - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THTensorRandom.h b/contrib/lua-torch/torch7/lib/TH/generic/THTensorRandom.h deleted file mode 100644 index d205142422..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THTensorRandom.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THTensorRandom.h" -#else - -TH_API void THTensor_(random)(THTensor *self, THGenerator *_generator); -TH_API void THTensor_(geometric)(THTensor *self, THGenerator *_generator, double p); -TH_API void THTensor_(bernoulli)(THTensor *self, THGenerator *_generator, double p); -TH_API void THTensor_(bernoulli_FloatTensor)(THTensor *self, THGenerator *_generator, THFloatTensor *p); -TH_API void THTensor_(bernoulli_DoubleTensor)(THTensor *self, THGenerator *_generator, THDoubleTensor *p); - -#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) -TH_API void THTensor_(uniform)(THTensor *self, THGenerator *_generator, double a, double b); -TH_API void THTensor_(normal)(THTensor *self, THGenerator *_generator, double mean, double stdv); -TH_API void THTensor_(exponential)(THTensor *self, THGenerator *_generator, double lambda); -TH_API void THTensor_(cauchy)(THTensor *self, THGenerator *_generator, double median, double sigma); -TH_API void THTensor_(logNormal)(THTensor *self, THGenerator *_generator, double mean, double stdv); -TH_API void THTensor_(multinomial)(THLongTensor *self, THGenerator *_generator, THTensor *prob_dist, int n_sample, int with_replacement); -#endif - -#if defined(TH_REAL_IS_BYTE) -TH_API void THTensor_(getRNGState)(THGenerator *_generator, THTensor *self); -TH_API void THTensor_(setRNGState)(THGenerator *_generator, THTensor *self); -#endif - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THVector.h b/contrib/lua-torch/torch7/lib/TH/generic/THVector.h deleted file mode 100644 index 7d368541a8..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THVector.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THVector.h" -#else - -TH_API void THVector_(fill)(real *x, const real c, const ptrdiff_t n); -TH_API void THVector_(cadd)(real *z, const real *x, const real *y, const real c, const ptrdiff_t n); -TH_API void THVector_(adds)(real *y, const real *x, const real c, const ptrdiff_t n); -TH_API void THVector_(cmul)(real *z, const real *x, const real *y, const ptrdiff_t n); -TH_API void THVector_(muls)(real *y, const real *x, const real c, const ptrdiff_t n); -TH_API void THVector_(cdiv)(real *z, const real *x, const real *y, const ptrdiff_t n); -TH_API void THVector_(divs)(real *y, const real *x, const real c, const ptrdiff_t n); -TH_API void THVector_(copy)(real *y, const real *x, const ptrdiff_t n); - -/* Initialize the dispatch pointers */ -TH_API void THVector_(vectorDispatchInit)(void); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/generic/THVectorDefault.c b/contrib/lua-torch/torch7/lib/TH/generic/THVectorDefault.c deleted file mode 100644 index 3388e0d9b6..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/THVectorDefault.c +++ /dev/null @@ -1,131 +0,0 @@ -#ifndef TH_GENERIC_FILE -#define TH_GENERIC_FILE "generic/THVectorDefault.c" -#else - -void THVector_(copy_DEFAULT)(real *x, const real *y, const ptrdiff_t n) { - ptrdiff_t i = 0; - - for(; i - -static __inline int __get_cpuid (unsigned int __level, unsigned int *__eax, - unsigned int *__ebx, unsigned int *__ecx, - unsigned int *__edx) { - unsigned int cpui[4]; - __cpuid(cpui, __level); - *__eax = cpui[0]; *__ebx = cpui[1]; *__ecx = cpui[2]; *__edx = cpui[3]; - return 1; -} - -static void xgetbv(unsigned int op, unsigned int* eax, unsigned int* edx) { - *eax = 0; *edx = 0; - if (op == 0) - *eax = _xgetbv(_XCR_XFEATURE_ENABLED_MASK); -} - -#else - -#if __i386__ -#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \ -__asm(" pushl %%ebx\n" \ -" cpuid\n" \ -" mov %%ebx,%1\n" \ -" popl %%ebx" \ -: "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \ -: "0"(__level)) -#else -#define __cpuid(__level, __eax, __ebx, __ecx, __edx) \ -__asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \ -: "0"(__level)) -#endif - -static __inline int __get_cpuid (unsigned int __level, unsigned int *__eax, - unsigned int *__ebx, unsigned int *__ecx, - unsigned int *__edx) { - __cpuid(__level, *__eax, *__ebx, *__ecx, *__edx); - return 1; -} - -static void xgetbv(unsigned int op, unsigned int* eax, unsigned int* edx) { - __asm__ __volatile__ - (".byte 0x0f, 0x01, 0xd0": "=a" (*eax), "=d" (*edx) : "c" (op) : "cc"); -} - -#endif - -enum ECPUFeature -{ - kCPUFeature_SSE = 0x01, - kCPUFeature_SSE2 = 0x02, - kCPUFeature_SSE3 = 0x04, - kCPUFeature_SSE3_S = 0x08, - kCPUFeature_SSE4_1 = 0x10, - kCPUFeature_SSE4_2 = 0x20, - kCPUFeature_AVX = 0x40 -}; - -static unsigned int checkCPUFeatures() { - unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; - unsigned int features = 0; - __get_cpuid(1, &eax, &ebx, &ecx, &edx); - if( (edx & (1 << 25)) != 0 ) { - features |= kCPUFeature_SSE; - } - if( (edx & (1 << 26)) != 0 ) { - features |= kCPUFeature_SSE2; - } - if( (ecx & (1 << 0)) != 0 ) { - features |= kCPUFeature_SSE3; - } - if( (ecx & (1 << 9)) != 0 ) { - features |= kCPUFeature_SSE3_S; - } - if( (ecx & (1 << 19)) != 0 ) { - features |= kCPUFeature_SSE4_1; - } - if( (ecx & (1 << 20)) != 0 ) { - features |= kCPUFeature_SSE4_2; - } - if( (ecx & (1 << 28)) != 0 && (ecx & (1 << 27)) != 0 && (ecx & (1 << 26)) != 0 ) { - xgetbv(0, &eax, &edx); - if( (eax & 6) == 6 ) { - features |= kCPUFeature_AVX; - } - } - return features; -} - -#include - -static int haveCPUFeature(unsigned int feature) { - static unsigned int sCPUFeatures = 0; - static int sDetectedCPUFeatures = 0; - if (!sDetectedCPUFeatures) { - sDetectedCPUFeatures = 1; - sCPUFeatures = checkCPUFeatures(); - if ((sCPUFeatures & kCPUFeature_AVX) != 0) { - printf("torch running avx\n"); - } else { - printf("torch running sse \n"); - } - } - return (sCPUFeatures & feature) != 0; -} - -#endif - -void convolve_5x5_sse(float* output, float* input, float* kernel, long outRows, long outCols, long outStride, long inCols); -void convolve_5x5_avx(float* output, float* input, float* kernel, long outRows, long outCols, long outStride, long inCols); - -void convolve_5x5(float* output, float* input, float* kernel, long outRows, long outCols, long inCols) { -#if defined(USE_AVX) && defined(__AVX__) - int avx = haveCPUFeature(kCPUFeature_AVX); - if (avx) - { - convolve_5x5_avx(output, input, kernel, outRows, outCols, outCols, inCols); - } - else -#endif - { - convolve_5x5_sse(output, input, kernel, outRows, outCols, outCols, inCols); - } -} diff --git a/contrib/lua-torch/torch7/lib/TH/generic/simd/convolve.h b/contrib/lua-torch/torch7/lib/TH/generic/simd/convolve.h deleted file mode 100644 index 7b9b04c50c..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/simd/convolve.h +++ /dev/null @@ -1 +0,0 @@ -void convolve_5x5(float* output, float* input, float* kernel, long outRows, long outCols, long inCols); \ No newline at end of file diff --git a/contrib/lua-torch/torch7/lib/TH/generic/simd/convolve5x5_avx.c b/contrib/lua-torch/torch7/lib/TH/generic/simd/convolve5x5_avx.c deleted file mode 100644 index 52b6d0ffb5..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/simd/convolve5x5_avx.c +++ /dev/null @@ -1,212 +0,0 @@ -#include -#include "common_simd.h" - -#define CLEAR_AVX() _mm256_zeroupper() - -void convolve_5x5_1_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount = count & 0xFFFFFFF8; - DECLARE_OUTPUT_1() - for (; i < alignedCount; i+=8) { - CONVOLVE_8COLS_XROWS(1, i) - } -} - -void convolve_5x5_2_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount = count & 0xFFFFFFF8; - DECLARE_OUTPUT_2() - for (; i < alignedCount; i+=8) { - CONVOLVE_8COLS_XROWS(2, i) - } -} - -void convolve_5x5_4_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount = count & 0xFFFFFFF8; - DECLARE_OUTPUT_4() - for (; i < alignedCount; i+=8) { - CONVOLVE_8COLS_XROWS(4, i) - } -} - -void convolve_5x5_5_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount = count & 0xFFFFFFF8; - DECLARE_OUTPUT_5() - for (; i < alignedCount; i+=8) { - CONVOLVE_8COLS_XROWS(5, i) - } -} - -void convolve_5x5_6_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount = count & 0xFFFFFFF8; - DECLARE_OUTPUT_6() - for (; i < alignedCount; i+=8) { - CONVOLVE_8COLS_XROWS(6, i) - } -} - -void convolve_5x5_7_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount = count & 0xFFFFFFF8; - DECLARE_OUTPUT_7() - for (; i < alignedCount; i+=8) { - CONVOLVE_8COLS_XROWS(7, i) - } -} - -void convolve_5x5_8_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount = count & 0xFFFFFFF8; - DECLARE_OUTPUT_8() - for (; i < alignedCount; i+=8) { - CONVOLVE_8COLS_XROWS(8, i) - } -} - -void convolve_5x5_64x64_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - for(int i = 0; i < 60; i+=6) - { - DECLARE_OUTPUT_6() - CONVOLVE_8COLS_XROWS(6, 0) - CONVOLVE_8COLS_XROWS(6, 8) - CONVOLVE_8COLS_XROWS(6, 16) - CONVOLVE_8COLS_XROWS(6, 24) - CONVOLVE_8COLS_XROWS(6, 32) - CONVOLVE_8COLS_XROWS(6, 40) - CONVOLVE_8COLS_XROWS(6, 48) - CONVOLVE_8COLS_XROWS(6, 56) - output += outputStride * 6; - image += inputStride * 6; - } - DECLARE_OUTPUT_4() - CONVOLVE_8COLS_XROWS(4, 0) - CONVOLVE_8COLS_XROWS(4, 8) - CONVOLVE_8COLS_XROWS(4, 16) - CONVOLVE_8COLS_XROWS(4, 24) - CONVOLVE_8COLS_XROWS(4, 32) - CONVOLVE_8COLS_XROWS(4, 40) - CONVOLVE_8COLS_XROWS(4, 48) - CONVOLVE_8COLS_XROWS(4, 56) -} - -void convolve_5x5_32x32_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - for(int i = 0; i < 30; i+=6) - { - DECLARE_OUTPUT_6() - CONVOLVE_8COLS_XROWS(6, 0) - CONVOLVE_8COLS_XROWS(6, 8) - CONVOLVE_8COLS_XROWS(6, 16) - CONVOLVE_8COLS_XROWS(6, 24) - output += outputStride * 6; - image += inputStride * 6; - } - DECLARE_OUTPUT_2() - CONVOLVE_8COLS_XROWS(2, 0) - CONVOLVE_8COLS_XROWS(2, 8) - CONVOLVE_8COLS_XROWS(2, 16) - CONVOLVE_8COLS_XROWS(2, 24) -} - -void convolve_5x5_16x16_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - for(int i = 0; i < 12; i+=6) - { - DECLARE_OUTPUT_6() - CONVOLVE_8COLS_XROWS(6, 0) - CONVOLVE_8COLS_XROWS(6, 8) - output += outputStride * 6; - image += inputStride * 6; - } - DECLARE_OUTPUT_4() - CONVOLVE_8COLS_XROWS(4, 0) - CONVOLVE_8COLS_XROWS(4, 8) -} - -void convolve_5x5_8x8_avx(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - DECLARE_OUTPUT_8() - CONVOLVE_8COLS_XROWS(8, 0) -} - -void convolve_5x5_sse(float* output, float* input, float* kernel, long outRows, long outCols, long outStride, long inCols); - -void convolve_5x5_avx(float* output, float* input, float* kernel, long outRows, long outCols, long outStride, long inCols) { - long ic = inCols; - long yy = 0; - float* t_ = input; - float* r_ = output; - float* k_ = kernel; - - if((outRows == 64) && (outCols == 64)) { - convolve_5x5_64x64_avx(output, input, kernel, outRows, outStride, inCols); - return; - } - - if((outRows == 32) && (outCols == 32)) { - convolve_5x5_32x32_avx(output, input, kernel, outRows, outStride, inCols); - return; - } - - if((outRows == 16) && (outCols == 16)) { - convolve_5x5_16x16_avx(output, input, kernel, outRows, outStride, inCols); - return; - } - - if((outRows == 8) && (outCols == 8)) { - convolve_5x5_8x8_avx(output, input, kernel, outRows, outStride, inCols); - return; - } - - for(; yy < (outRows / 6 ) * 6; yy += 6) { - float *pi_ = t_ + yy*ic; - float *pw_ = k_; - float *pis_ = pi_; - convolve_5x5_6_avx(r_, pis_, pw_, outCols, outStride, ic); - r_ += (outStride * 6); - } - - // more than 2 rows left to process and we ended up on a non-multiple of 4 - if((yy < (outRows & 0xFFFFFFFE)) && ((yy % 4) != 0)) { - // process 2 rows to align on the next multiple of 4 rows (because we were a multiple of 6 after the previous loop) - float *pi_ = t_ + yy*ic; - float *pw_ = k_; - float *pis_ = pi_; - convolve_5x5_2_avx(r_, pis_, pw_, outCols, outStride, ic); - r_ += (outStride * 2); - yy += 2; - } - - for(; yy < (outRows & 0xFFFFFFFC); yy += 4) { - float *pi_ = t_ + yy*ic; - float *pw_ = k_; - float *pis_ = pi_; - convolve_5x5_4_avx(r_, pis_, pw_, outCols, outStride, ic); - r_ += (outStride * 4); - } - - for(; yy < (outRows & 0xFFFFFFFE); yy += 2) { - float *pi_ = t_ + yy*ic; - float *pw_ = k_; - float *pis_ = pi_; - convolve_5x5_2_avx(r_, pis_, pw_, outCols, outStride, ic); - r_ += (outStride * 2); - } - - for(; yy < outRows; yy += 1) { - float *pi_ = t_ + yy*ic; - float *pw_ = k_; - float *pis_ = pi_; - convolve_5x5_1_avx(r_, pis_, pw_, outCols, outStride, ic); - r_ += (outStride * 1); - } - - long procCols = outCols & 0xFFFFFFF8; // avx version processes 8 cols at a time - long remCols = outCols - procCols; - - //process the rest using sse - if( remCols > 0) { - CLEAR_AVX(); - convolve_5x5_sse(&output[procCols], &input[procCols], kernel, outRows, remCols, outStride, inCols); - } -} \ No newline at end of file diff --git a/contrib/lua-torch/torch7/lib/TH/generic/simd/convolve5x5_sse.c b/contrib/lua-torch/torch7/lib/TH/generic/simd/convolve5x5_sse.c deleted file mode 100644 index f34b796957..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/simd/convolve5x5_sse.c +++ /dev/null @@ -1,320 +0,0 @@ -#include -#include "common_simd.h" - - -/* SSE variants */ -void convolve_5x5_1_sse(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount4 = count & 0xFFFFFFFC; - DECLARE_OUTPUT_1() - for (; i < alignedCount4; i+=4) { - CONVOLVE_4COLS_XROWS(1, i) - } - for (; i < (count); i++) { - float output0 = output[i + outputStride * 0]; - int row; - for (row = 0; row < 5; row++) { - int col; - for (col = 0; col < 5; col++) { - output0 += weight[5 * row + col] * image[i + (row + 0) * inputStride + col]; - } - } - output[i + outputStride * 0] = output0; - } -} - -void convolve_5x5_2_sse(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount4 = count & 0xFFFFFFFC; - DECLARE_OUTPUT_2() - for (; i < alignedCount4; i+=4) { - CONVOLVE_4COLS_XROWS(2, i) - } - for (; i < (count); i++) { - float output0 = output[i + outputStride * 0]; - float output1 = output[i + outputStride * 1]; - int row; - for (row = 0; row < 5; row++) { - int col; - for (col = 0; col < 5; col++) { - output0 += weight[5 * row + col] * image[i + (row + 0) * inputStride + col]; - output1 += weight[5 * row + col] * image[i + (row + 1) * inputStride + col]; - } - } - output[i + outputStride * 0] = output0; - output[i + outputStride * 1] = output1; - } -} - -void convolve_5x5_4_sse(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount4 = count & 0xFFFFFFFC; - DECLARE_OUTPUT_4() - for (; i < alignedCount4; i+=4) { - CONVOLVE_4COLS_XROWS(4, i) - } - for (; i < (count); i++) { - float output0 = output[i + outputStride * 0]; - float output1 = output[i + outputStride * 1]; - float output2 = output[i + outputStride * 2]; - float output3 = output[i + outputStride * 3]; - int row; - for (row = 0; row < 5; row++) { - int col; - for (col = 0; col < 5; col++) { - output0 += weight[5 * row + col] * image[i + (row + 0) * inputStride + col]; - output1 += weight[5 * row + col] * image[i + (row + 1) * inputStride + col]; - output2 += weight[5 * row + col] * image[i + (row + 2) * inputStride + col]; - output3 += weight[5 * row + col] * image[i + (row + 3) * inputStride + col]; - } - } - output[i + outputStride * 0] = output0; - output[i + outputStride * 1] = output1; - output[i + outputStride * 2] = output2; - output[i + outputStride * 3] = output3; - } -} - -void convolve_5x5_6_sse(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount4 = count & 0xFFFFFFFC; - DECLARE_OUTPUT_6() - for (; i < alignedCount4; i+=4) { - CONVOLVE_4COLS_XROWS(6, i) - } - for (; i<(count); i++) { - float output0 = output[i + outputStride * 0]; - float output1 = output[i + outputStride * 1]; - float output2 = output[i + outputStride * 2]; - float output3 = output[i + outputStride * 3]; - float output4 = output[i + outputStride * 4]; - float output5 = output[i + outputStride * 5]; - int row; - for (row = 0; row < 5; row++) { - int col; - for (col = 0; col < 5; col++) { - output0 += weight[5 * row + col] * image[i + (row + 0) * inputStride + col]; - output1 += weight[5 * row + col] * image[i + (row + 1) * inputStride + col]; - output2 += weight[5 * row + col] * image[i + (row + 2) * inputStride + col]; - output3 += weight[5 * row + col] * image[i + (row + 3) * inputStride + col]; - output4 += weight[5 * row + col] * image[i + (row + 4) * inputStride + col]; - output5 += weight[5 * row + col] * image[i + (row + 5) * inputStride + col]; - } - } - output[i + outputStride * 0] = output0; - output[i + outputStride * 1] = output1; - output[i + outputStride * 2] = output2; - output[i + outputStride * 3] = output3; - output[i + outputStride * 4] = output4; - output[i + outputStride * 5] = output5; - } -} - -void convolve_5x5_8_sse(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - long i = 0; - long alignedCount4 = count & 0xFFFFFFFC; - DECLARE_OUTPUT_8() - for (; i < alignedCount4; i+=4) { - CONVOLVE_4COLS_XROWS(8, i) - } - for (; i<(count); i++) { - float output0 = output[i + outputStride * 0]; - float output1 = output[i + outputStride * 1]; - float output2 = output[i + outputStride * 2]; - float output3 = output[i + outputStride * 3]; - float output4 = output[i + outputStride * 4]; - float output5 = output[i + outputStride * 5]; - float output6 = output[i + outputStride * 6]; - float output7 = output[i + outputStride * 7]; - int row; - for (row = 0; row < 5; row++) { - int col; - for (col = 0; col < 5; col++) { - output0 += weight[5 * row + col] * image[i + (row + 0) * inputStride + col]; - output1 += weight[5 * row + col] * image[i + (row + 1) * inputStride + col]; - output2 += weight[5 * row + col] * image[i + (row + 2) * inputStride + col]; - output3 += weight[5 * row + col] * image[i + (row + 3) * inputStride + col]; - output4 += weight[5 * row + col] * image[i + (row + 4) * inputStride + col]; - output5 += weight[5 * row + col] * image[i + (row + 5) * inputStride + col]; - output6 += weight[5 * row + col] * image[i + (row + 6) * inputStride + col]; - output7 += weight[5 * row + col] * image[i + (row + 7) * inputStride + col]; - } - } - output[i + outputStride * 0] = output0; - output[i + outputStride * 1] = output1; - output[i + outputStride * 2] = output2; - output[i + outputStride * 3] = output3; - output[i + outputStride * 4] = output4; - output[i + outputStride * 5] = output5; - output[i + outputStride * 6] = output6; - output[i + outputStride * 7] = output7; - } -} - -#define UNROLL_SSE_CONVOLUTION 0 -#if (UNROLL_SSE_CONVOLUTION) - -void convolve_5x5_64x64_sse(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - for(int i = 0; i < 60; i+=6) - { - DECLARE_OUTPUT_6() - CONVOLVE_4COLS_XROWS(6, 0) - CONVOLVE_4COLS_XROWS(6, 4) - CONVOLVE_4COLS_XROWS(6, 8) - CONVOLVE_4COLS_XROWS(6, 12) - CONVOLVE_4COLS_XROWS(6, 16) - CONVOLVE_4COLS_XROWS(6, 20) - CONVOLVE_4COLS_XROWS(6, 24) - CONVOLVE_4COLS_XROWS(6, 28) - CONVOLVE_4COLS_XROWS(6, 32) - CONVOLVE_4COLS_XROWS(6, 36) - CONVOLVE_4COLS_XROWS(6, 40) - CONVOLVE_4COLS_XROWS(6, 44) - CONVOLVE_4COLS_XROWS(6, 48) - CONVOLVE_4COLS_XROWS(6, 52) - CONVOLVE_4COLS_XROWS(6, 56) - CONVOLVE_4COLS_XROWS(6, 60) - output += outputStride * 6; - image += inputStride * 6; - } - DECLARE_OUTPUT_4() - CONVOLVE_4COLS_XROWS(4, 0) - CONVOLVE_4COLS_XROWS(4, 4) - CONVOLVE_4COLS_XROWS(4, 8) - CONVOLVE_4COLS_XROWS(4, 12) - CONVOLVE_4COLS_XROWS(4, 16) - CONVOLVE_4COLS_XROWS(4, 20) - CONVOLVE_4COLS_XROWS(4, 24) - CONVOLVE_4COLS_XROWS(4, 28) - CONVOLVE_4COLS_XROWS(4, 32) - CONVOLVE_4COLS_XROWS(4, 36) - CONVOLVE_4COLS_XROWS(4, 40) - CONVOLVE_4COLS_XROWS(4, 44) - CONVOLVE_4COLS_XROWS(4, 48) - CONVOLVE_4COLS_XROWS(4, 52) - CONVOLVE_4COLS_XROWS(4, 56) - CONVOLVE_4COLS_XROWS(4, 60) -} - -void convolve_5x5_32x32_sse(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - for(int i = 0; i < 30; i+=6) - { - DECLARE_OUTPUT_6() - - CONVOLVE_4COLS_XROWS(6, 0) - CONVOLVE_4COLS_XROWS(6, 4) - CONVOLVE_4COLS_XROWS(6, 8) - CONVOLVE_4COLS_XROWS(6, 12) - CONVOLVE_4COLS_XROWS(6, 16) - CONVOLVE_4COLS_XROWS(6, 20) - CONVOLVE_4COLS_XROWS(6, 24) - CONVOLVE_4COLS_XROWS(6, 28) - - output += outputStride * 6; - image += inputStride * 6; - } - DECLARE_OUTPUT_2() - CONVOLVE_4COLS_XROWS(2, 0) - CONVOLVE_4COLS_XROWS(2, 4) - CONVOLVE_4COLS_XROWS(2, 8) - CONVOLVE_4COLS_XROWS(2, 12) - CONVOLVE_4COLS_XROWS(2, 16) - CONVOLVE_4COLS_XROWS(2, 20) - CONVOLVE_4COLS_XROWS(2, 24) - CONVOLVE_4COLS_XROWS(2, 28) -} - -void convolve_5x5_16x16_sse(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - for(int i = 0; i < 12; i+=6) - { - DECLARE_OUTPUT_6() - CONVOLVE_4COLS_XROWS(6, 0) - CONVOLVE_4COLS_XROWS(6, 4) - CONVOLVE_4COLS_XROWS(6, 8) - CONVOLVE_4COLS_XROWS(6, 12) - output += outputStride * 6; - image += inputStride * 6; - } - DECLARE_OUTPUT_4() - CONVOLVE_4COLS_XROWS(4, 0) - CONVOLVE_4COLS_XROWS(4, 4) - CONVOLVE_4COLS_XROWS(4, 8) - CONVOLVE_4COLS_XROWS(4, 12) -} - -void convolve_5x5_8x8_sse(float* output, float* image, float* weight, long count, long outputStride, long inputStride) { - DECLARE_OUTPUT_8() - CONVOLVE_4COLS_XROWS(8, 0) - CONVOLVE_4COLS_XROWS(8, 4) -} - -#endif - -void convolve_5x5_sse(float* output, float* input, float* kernel, long outRows, long outCols, long outStride, long inCols) { - long yy = 0; - float* t_ = input; - float* r_ = output; - float* k_ = kernel; -#if (UNROLL_SSE_CONVOLUTION) - if((outRows == 64) && (outCols == 64)) { - convolve_5x5_64x64_sse(output, input, kernel, outRows, outStride, inCols); - return; - } - - if((outRows == 32) && (outCols == 32)) { - convolve_5x5_32x32_sse(output, input, kernel, outRows, outStride, inCols); - return; - } - - if((outRows == 16) && (outCols == 16)) { - convolve_5x5_16x16_sse(output, input, kernel, outRows, outStride, inCols); - return; - } - - if((outRows == 8) && (outCols == 8)) { - convolve_5x5_8x8_sse(output, input, kernel, outRows, outStride, inCols); - return; - } -#endif - for(; yy < (outRows / 6 ) * 6; yy += 6) { - float *pi_ = t_ + yy*inCols; - float *pw_ = k_; - float *pis_ = pi_; - convolve_5x5_6_sse(r_, pis_, pw_, outCols, outStride, inCols); - r_ += (outStride * 6); - } - // more than 2 rows left to process and we ended up on a non-multiple of 4 - if((yy < (outRows & 0xFFFFFFFE)) && ((yy % 4) != 0)) { - // process 2 rows to align on the next multiple of 4 rows (because we were a multiple of 6 after the previous loop) - float *pi_ = t_ + yy*inCols; - float *pw_ = k_; - float *pis_ = pi_; - convolve_5x5_2_sse(r_, pis_, pw_, outCols, outStride, inCols); - r_ += (outStride * 2); - yy += 2; - } - - for(; yy < (outRows & 0xFFFFFFFC); yy += 4) { - float *pi_ = t_ + yy*inCols; - float *pw_ = k_; - float *pis_ = pi_; - convolve_5x5_4_sse(r_, pis_, pw_, outCols, outStride, inCols); - r_ += (outStride * 4); - } - - for(; yy < (outRows & 0xFFFFFFFE); yy += 2) { - float *pi_ = t_ + yy*inCols; - float *pw_ = k_; - float *pis_ = pi_; - convolve_5x5_2_sse(r_, pis_, pw_, outCols, outStride, inCols); - r_ += (outStride * 2); - } - - for(; yy < outRows; yy += 1) { - float *pi_ = t_ + yy*inCols; - float *pw_ = k_; - float *pis_ = pi_; - convolve_5x5_1_sse(r_, pis_, pw_, outCols, outStride, inCols); - r_ += (outStride * 1); - } -} diff --git a/contrib/lua-torch/torch7/lib/TH/generic/simd/simd.h b/contrib/lua-torch/torch7/lib/TH/generic/simd/simd.h deleted file mode 100644 index b1878ad5be..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/generic/simd/simd.h +++ /dev/null @@ -1,165 +0,0 @@ -#ifndef TH_SIMD_INC -#define TH_SIMD_INC - -#include -#include -#if defined(_MSC_VER) -#include -#elif defined(HAVE_GCC_GET_CPUID) && defined(USE_GCC_GET_CPUID) -#include -#endif - -// Can be found on Intel ISA Reference for CPUID -#define CPUID_AVX2_BIT 0x20 // Bit 5 of EBX for EAX=0x7 -#define CPUID_AVX_BIT 0x10000000 // Bit 28 of ECX for EAX=0x1 -#define CPUID_SSE_BIT 0x2000000 // bit 25 of EDX for EAX=0x1 - -// Helper macros for initialization -#define FUNCTION_IMPL(NAME, EXT) \ - { .function=(void *)NAME, \ - .supportedSimdExt=EXT \ - } - -#define INIT_DISPATCH_PTR(OP) \ - do { \ - int i; \ - for (i = 0; i < sizeof(THVector_(OP ## _DISPATCHTABLE)) / sizeof(FunctionDescription); ++i) { \ - THVector_(OP ## _DISPATCHPTR) = THVector_(OP ## _DISPATCHTABLE)[i].function; \ - if (THVector_(OP ## _DISPATCHTABLE)[i].supportedSimdExt & hostSimdExts) { \ - break; \ - } \ - } \ - } while(0) - - -typedef struct FunctionDescription -{ - void *function; - uint32_t supportedSimdExt; -} FunctionDescription; - - -enum SIMDExtensions -{ -#if defined(__NEON__) - SIMDExtension_NEON = 0x1, -#elif defined(__PPC64__) - SIMDExtension_VSX = 0x1, -#else - SIMDExtension_AVX2 = 0x1, - SIMDExtension_AVX = 0x2, - SIMDExtension_SSE = 0x4, -#endif - SIMDExtension_DEFAULT = 0x0 -}; - - -#if defined(__arm__) || defined(__aarch64__) // incl. armel, armhf, arm64 - - #if defined(__NEON__) - -static inline uint32_t detectHostSIMDExtensions() -{ - return SIMDExtension_NEON; -} - - #else //ARM without NEON - -static inline uint32_t detectHostSIMDExtensions() -{ - return SIMDExtension_DEFAULT; -} - - #endif - -#elif defined(__PPC64__) - - #if defined(__VSX__) - -static inline uint32_t detectHostSIMDExtensions() -{ - uint32_t hostSimdExts = SIMDExtension_DEFAULT; - char *evar; - - evar = getenv("TH_NO_VSX"); - if (evar == NULL || strncmp(evar, "1", 2) != 0) - hostSimdExts = SIMDExtension_VSX; - return hostSimdExts; -} - - #else //PPC64 without VSX - -static inline uint32_t detectHostSIMDExtensions() -{ - return SIMDExtension_DEFAULT; -} - - #endif - -#else // x86 -static inline void cpuid(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) -{ -#if defined(_MSC_VER) - uint32_t cpuInfo[4]; - __cpuid(cpuInfo, *eax); - *eax = cpuInfo[0]; - *ebx = cpuInfo[1]; - *ecx = cpuInfo[2]; - *edx = cpuInfo[3]; -#elif defined(HAVE_GCC_GET_CPUID) && defined(USE_GCC_GET_CPUID) - uint32_t level = *eax; - __get_cpuid (level, eax, ebx, ecx, edx); -#else - uint32_t a = *eax, b, c = *ecx, d; - __asm volatile ( "cpuid\n\t" - : "+a"(a), "=b"(b), "+c"(c), "=d"(d) ); - *eax = a; - *ebx = b; - *ecx = c; - *edx = d; -#endif -} - -static inline uint32_t detectHostSIMDExtensions() -{ - uint32_t eax, ebx, ecx, edx; - uint32_t hostSimdExts = 0x0; - int TH_NO_AVX = 1, TH_NO_AVX2 = 1, TH_NO_SSE = 1; - char *evar; - - evar = getenv("TH_NO_AVX2"); - if (evar == NULL || strncmp(evar, "1", 2) != 0) - TH_NO_AVX2 = 0; - - // Check for AVX2. Requires separate CPUID - eax = 0x7; - ecx = 0x0; - cpuid(&eax, &ebx, &ecx, &edx); - if ((ebx & CPUID_AVX2_BIT) && TH_NO_AVX2 == 0) { - hostSimdExts |= SIMDExtension_AVX2; - } - - // Detect and enable AVX and SSE - eax = 0x1; - cpuid(&eax, &ebx, &ecx, &edx); - - evar = getenv("TH_NO_AVX"); - if (evar == NULL || strncmp(evar, "1", 2) != 0) - TH_NO_AVX = 0; - if (ecx & CPUID_AVX_BIT && TH_NO_AVX == 0) { - hostSimdExts |= SIMDExtension_AVX; - } - - evar = getenv("TH_NO_SSE"); - if (evar == NULL || strncmp(evar, "1", 2) != 0) - TH_NO_SSE = 0; - if (edx & CPUID_SSE_BIT && TH_NO_SSE == 0) { - hostSimdExts |= SIMDExtension_SSE; - } - - return hostSimdExts; -} - -#endif // end SIMD extension detection code - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/vector/AVX.c b/contrib/lua-torch/torch7/lib/TH/vector/AVX.c deleted file mode 100644 index 58c4e6d358..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/vector/AVX.c +++ /dev/null @@ -1,274 +0,0 @@ -#if defined(USE_AVX) && defined(__AVX__) -#ifndef _MSC_VER -#include -#else -#include -#endif - -#include "AVX.h" - -void THDoubleVector_copy_AVX(double *y, const double *x, const ptrdiff_t n) { - ptrdiff_t i; - ptrdiff_t off; - for (i=0; i<=((n)-8); i+=8) { - _mm256_storeu_pd(y+i, _mm256_loadu_pd(x+i)); - _mm256_storeu_pd(y+i+4, _mm256_loadu_pd(x+i+4)); - } - off = (n) - ((n)%8); - for (i=0; i<((n)%8); i++) { - y[off+i] = x[off+i]; - } -} - -void THDoubleVector_fill_AVX(double *x, const double c, const ptrdiff_t n) { - ptrdiff_t i; - ptrdiff_t off; - __m256d YMM0 = _mm256_set_pd(c, c, c, c); - for (i=0; i<=((n)-16); i+=16) { - _mm256_storeu_pd((x)+i , YMM0); - _mm256_storeu_pd((x)+i+4, YMM0); - _mm256_storeu_pd((x)+i+8, YMM0); - _mm256_storeu_pd((x)+i+12, YMM0); - } - off = (n) - ((n)%16); - for (i=0; i<((n)%16); i++) { - x[off+i] = c; - } -} - -void THDoubleVector_cdiv_AVX(double *z, const double *x, const double *y, const ptrdiff_t n) { - ptrdiff_t i; - __m256d YMM0, YMM1, YMM2, YMM3; - for (i=0; i<=((n)-8); i+=8) { - YMM0 = _mm256_loadu_pd(x+i); - YMM1 = _mm256_loadu_pd(x+i+4); - YMM2 = _mm256_loadu_pd(y+i); - YMM3 = _mm256_loadu_pd(y+i+4); - YMM2 = _mm256_div_pd(YMM0, YMM2); - YMM3 = _mm256_div_pd(YMM1, YMM3); - _mm256_storeu_pd(z+i, YMM2); - _mm256_storeu_pd(z+i+4, YMM3); - } - for (; i<(n); i++) { - z[i] = x[i] / y[i]; - } -} - -void THDoubleVector_divs_AVX(double *y, const double *x, const double c, const ptrdiff_t n) { - ptrdiff_t i; - __m256d YMM15 = _mm256_set_pd(c, c, c, c); - __m256d YMM0, YMM1; - for (i=0; i<=((n)-8); i+=8) { - YMM0 = _mm256_loadu_pd(x+i); - YMM1 = _mm256_loadu_pd(x+i+4); - YMM0 = _mm256_div_pd(YMM0, YMM15); - YMM1 = _mm256_div_pd(YMM1, YMM15); - _mm256_storeu_pd(y+i, YMM0); - _mm256_storeu_pd(y+i+4, YMM1); - } - for (; i<(n); i++) { - y[i] = x[i] / c; - } -} - -void THDoubleVector_cmul_AVX(double *z, const double *x, const double *y, const ptrdiff_t n) { - ptrdiff_t i; - __m256d YMM0, YMM1, YMM2, YMM3; - for (i=0; i<=((n)-8); i+=8) { - YMM0 = _mm256_loadu_pd(x+i); - YMM1 = _mm256_loadu_pd(x+i+4); - YMM2 = _mm256_loadu_pd(y+i); - YMM3 = _mm256_loadu_pd(y+i+4); - YMM2 = _mm256_mul_pd(YMM0, YMM2); - YMM3 = _mm256_mul_pd(YMM1, YMM3); - _mm256_storeu_pd(z+i, YMM2); - _mm256_storeu_pd(z+i+4, YMM3); - } - for (; i - -void THDoubleVector_copy_AVX(double *y, const double *x, const ptrdiff_t n); -void THDoubleVector_fill_AVX(double *x, const double c, const ptrdiff_t n); -void THDoubleVector_cdiv_AVX(double *z, const double *x, const double *y, const ptrdiff_t n); -void THDoubleVector_divs_AVX(double *y, const double *x, const double c, const ptrdiff_t n); -void THDoubleVector_cmul_AVX(double *z, const double *x, const double *y, const ptrdiff_t n); -void THDoubleVector_muls_AVX(double *y, const double *x, const double c, const ptrdiff_t n); -void THDoubleVector_cadd_AVX(double *z, const double *x, const double *y, const double c, const ptrdiff_t n); -void THDoubleVector_adds_AVX(double *y, const double *x, const double c, const ptrdiff_t n); -void THFloatVector_copy_AVX(float *y, const float *x, const ptrdiff_t n); -void THFloatVector_fill_AVX(float *x, const float c, const ptrdiff_t n); -void THFloatVector_cdiv_AVX(float *z, const float *x, const float *y, const ptrdiff_t n); -void THFloatVector_divs_AVX(float *y, const float *x, const float c, const ptrdiff_t n); -void THFloatVector_cmul_AVX(float *z, const float *x, const float *y, const ptrdiff_t n); -void THFloatVector_muls_AVX(float *y, const float *x, const float c, const ptrdiff_t n); -void THFloatVector_cadd_AVX(float *z, const float *x, const float *y, const float c, const ptrdiff_t n); -void THFloatVector_adds_AVX(float *y, const float *x, const float c, const ptrdiff_t n); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/vector/AVX2.c b/contrib/lua-torch/torch7/lib/TH/vector/AVX2.c deleted file mode 100644 index 082a680eab..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/vector/AVX2.c +++ /dev/null @@ -1,47 +0,0 @@ -#if defined(__AVX2__) -#ifndef _MSC_VER -#include -#else -#include -#endif -#include "AVX2.h" - -void THDoubleVector_cadd_AVX2(double *z, const double *x, const double *y, const double c, const ptrdiff_t n) { - ptrdiff_t i; - __m256d YMM15 = _mm256_set_pd(c, c, c, c); - __m256d YMM0, YMM1, YMM2, YMM3; - for (i=0; i<=((n)-8); i+=8) { - YMM0 = _mm256_loadu_pd(y+i); - YMM1 = _mm256_loadu_pd(y+i+4); - YMM2 = _mm256_loadu_pd(x+i); - YMM3 = _mm256_loadu_pd(x+i+4); - YMM2 = _mm256_fmadd_pd(YMM0, YMM15, YMM2); - YMM3 = _mm256_fmadd_pd(YMM1, YMM15, YMM3); - _mm256_storeu_pd(z+i, YMM2); - _mm256_storeu_pd(z+i+4, YMM3); - } - for (; i<(n); i++) { - z[i] = x[i] + y[i] * c; - } -} - -void THFloatVector_cadd_AVX2(float *z, const float *x, const float *y, const float c, const ptrdiff_t n) { - ptrdiff_t i; - __m256 YMM15 = _mm256_set_ps(c, c, c, c, c, c, c, c); - __m256 YMM0, YMM1, YMM2, YMM3; - for (i=0; i<=((n)-16); i+=16) { - YMM0 = _mm256_loadu_ps(y+i); - YMM1 = _mm256_loadu_ps(y+i+8); - YMM2 = _mm256_loadu_ps(x+i); - YMM3 = _mm256_loadu_ps(x+i+8); - YMM2 = _mm256_fmadd_ps(YMM0, YMM15, YMM2); - YMM3 = _mm256_fmadd_ps(YMM1, YMM15, YMM3); - _mm256_storeu_ps(z+i, YMM2); - _mm256_storeu_ps(z+i+8, YMM3); - } - for (; i<(n); i++) { - z[i] = x[i] + y[i] * c; - } -} - -#endif // defined(__AVX2__) diff --git a/contrib/lua-torch/torch7/lib/TH/vector/AVX2.h b/contrib/lua-torch/torch7/lib/TH/vector/AVX2.h deleted file mode 100644 index 85a9e93ee1..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/vector/AVX2.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef TH_AVX2_H -#define TH_AVX2_H - -#include - -void THDoubleVector_cadd_AVX2(double *z, const double *x, const double *y, const double c, const ptrdiff_t n); -void THFloatVector_cadd_AVX2(float *z, const float *x, const float *y, const float c, const ptrdiff_t n); - -#endif diff --git a/contrib/lua-torch/torch7/lib/TH/vector/NEON.c b/contrib/lua-torch/torch7/lib/TH/vector/NEON.c deleted file mode 100644 index 7920fb13b1..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/vector/NEON.c +++ /dev/null @@ -1,105 +0,0 @@ -static void THFloatVector_fill_NEON(float *x, const float c, const ptrdiff_t n) { - long i = 0; - - for(; i < n-4; i += 4) - { - x[i] = c; - x[i+1] = c; - x[i+2] = c; - x[i+3] = c; - } - - for(; i < n; i++) - x[i] = c; - -} - -static void THFloatVector_cmul_NEON(float *z, const float *x, const float* y, const ptrdiff_t n) { - long i = 0; - - for(; i < n-4; i += 4) - { - z[i] = x[i] * y[i]; - z[i+1] = x[i+1] * y[i+1]; - z[i+2] = x[i+2] * y[i+2]; - z[i+3] = x[i+3] * y[i+3]; - } - - for(; i < n; i++) - z[i] = x[i] * y[i]; -} - -static void THFloatVector_muls_NEON(float *y, const float *x, const float c, const ptrdiff_t n) { - long i = 0; - - for(; i < n-4; i += 4) - { - y[i] = x[i] * c; - y[i+1] = x[i+1] * c; - y[i+2] = x[i+2] * c; - y[i+3] = x[i+3] * c; - } - - for(; i < n; i++) - y[i] = x[i] * c; -} - -static void THFloatVector_cadd_NEON(float *z, const float *x, const float *y, const float c, const ptrdiff_t n) { - long i = 0; - - for(;i < n-4; i += 4) - { - z[i] = x[i] + c * y[i]; - z[i+1] = x[i+1] + c * y[i+1]; - z[i+2] = x[i+2] + c * y[i+2]; - z[i+3] = x[i+3] + c * y[i+3]; - } - - for(; i < n; i++) - z[i] = x[i] + c * y[i]; -} - -static void THFloatVector_adds_NEON(float *y, const float *x, const float c, const ptrdiff_t n) { - long i = 0; - - for(;i < n-4; i += 4) - { - y[i] = x[i] + c; - y[i+1] = x[i+1] + c; - y[i+2] = x[i+2] + c; - y[i+3] = x[i+3] + c; - } - - for(; i < n; i++) - y[i] = x[i] + c; -} - -static void THFloatVector_cdiv_NEON(float *z, const float *x, const float *y, const ptrdiff_t n) { - long i = 0; - - for(;i < n-4; i += 4) - { - z[i] = x[i] / y[i]; - z[i+1] = x[i+1] / y[i+1]; - z[i+2] = x[i+2] / y[i+2]; - z[i+3] = x[i+3] / y[i+3]; - } - - for(; i < n; i++) - z[i] = x[i] / y[i]; -} - -static void THFloatVector_divs_NEON(float *y, const float *x, const float c, const ptrdiff_t n) { - long i = 0; - - for(;i < n-4; i += 4) - { - y[i] = x[i] / c; - y[i+1] = x[i+1] / c; - y[i+2] = x[i+2] / c; - y[i+3] = x[i+3] / c; - } - - for(; i < n; i++) - y[i] = x[i] / c; -} diff --git a/contrib/lua-torch/torch7/lib/TH/vector/SSE.c b/contrib/lua-torch/torch7/lib/TH/vector/SSE.c deleted file mode 100644 index d026935ab0..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/vector/SSE.c +++ /dev/null @@ -1,268 +0,0 @@ -#ifndef _MSC_VER -#include -#else -#include -#endif - -static void THDoubleVector_fill_SSE(double *x, const double c, const ptrdiff_t n) { - ptrdiff_t i; - ptrdiff_t off; - __m128d XMM0 = _mm_set1_pd(c); - for (i=0; i<=((n)-8); i+=8) { - _mm_storeu_pd((x)+i , XMM0); - _mm_storeu_pd((x)+i+2, XMM0); - _mm_storeu_pd((x)+i+4, XMM0); - _mm_storeu_pd((x)+i+6, XMM0); - } - off = (n) - ((n)%8); - for (i=0; i<((n)%8); i++) { - x[off+i] = c; - } -} - -static void THDoubleVector_cadd_SSE(double *z, const double *x, const double *y, const double c, const ptrdiff_t n) { - ptrdiff_t i; - __m128d XMM7 = _mm_set1_pd(c); - __m128d XMM0, XMM2; - for (i=0; i<=((n)-2); i+=2) { - XMM0 = _mm_loadu_pd((x)+i); - XMM2 = _mm_loadu_pd((y)+i); - XMM2 = _mm_mul_pd(XMM2, XMM7); - XMM2 = _mm_add_pd(XMM0, XMM2); - _mm_storeu_pd((z)+i, XMM2); - } - for (; i<(n); i++) { - z[i] = x[i] + c * y[i]; - } -} - -static void THDoubleVector_adds_SSE(double *y, const double *x, const double c, const ptrdiff_t n) { - ptrdiff_t i; - __m128d XMM7 = _mm_set1_pd(c); - __m128d XMM0, XMM2; - for (i=0; i<=((n)-4); i+=4) { - XMM0 = _mm_loadu_pd((x)+i); - XMM2 = _mm_loadu_pd((x)+i+2); - XMM0 = _mm_add_pd(XMM0, XMM7); - XMM2 = _mm_add_pd(XMM2, XMM7); - _mm_storeu_pd((y)+i, XMM0); - _mm_storeu_pd((y)+i+2, XMM2); - } - for (; i<(n); i++) { - y[i] = x[i] + c; - } -} - -static void THDoubleVector_cmul_SSE(double *z, const double *x, const double *y, const ptrdiff_t n) { - ptrdiff_t i; - for (i=0; i<=((n)-8); i+=8) { - __m128d XMM0 = _mm_loadu_pd((x)+i ); - __m128d XMM1 = _mm_loadu_pd((x)+i+2); - __m128d XMM2 = _mm_loadu_pd((x)+i+4); - __m128d XMM3 = _mm_loadu_pd((x)+i+6); - __m128d XMM4 = _mm_loadu_pd((y)+i ); - __m128d XMM5 = _mm_loadu_pd((y)+i+2); - __m128d XMM6 = _mm_loadu_pd((y)+i+4); - __m128d XMM7 = _mm_loadu_pd((y)+i+6); - XMM4 = _mm_mul_pd(XMM4, XMM0); - XMM5 = _mm_mul_pd(XMM5, XMM1); - XMM6 = _mm_mul_pd(XMM6, XMM2); - XMM7 = _mm_mul_pd(XMM7, XMM3); - _mm_storeu_pd((z)+i , XMM4); - _mm_storeu_pd((z)+i+2, XMM5); - _mm_storeu_pd((z)+i+4, XMM6); - _mm_storeu_pd((z)+i+6, XMM7); - } - for (; i<(n); i++) { - z[i] = x[i] * y[i]; - } -} - -static void THDoubleVector_muls_SSE(double *y, const double *x, const double c, const ptrdiff_t n) { - ptrdiff_t i; - __m128d XMM15 = _mm_set1_pd(c); - for (i=0; i<=((n)-8); i+=8) { - __m128d XMM0 = _mm_loadu_pd((x)+i ); - __m128d XMM1 = _mm_loadu_pd((x)+i+2); - __m128d XMM2 = _mm_loadu_pd((x)+i+4); - __m128d XMM3 = _mm_loadu_pd((x)+i+6); - __m128d XMM4 = _mm_mul_pd(XMM15, XMM0); - __m128d XMM5 = _mm_mul_pd(XMM15, XMM1); - __m128d XMM6 = _mm_mul_pd(XMM15, XMM2); - __m128d XMM7 = _mm_mul_pd(XMM15, XMM3); - _mm_storeu_pd((y)+i , XMM4); - _mm_storeu_pd((y)+i+2, XMM5); - _mm_storeu_pd((y)+i+4, XMM6); - _mm_storeu_pd((y)+i+6, XMM7); - } - for (; i<(n); i++) { - y[i] = x[i] * c; - } -} - -static void THDoubleVector_cdiv_SSE(double *z, const double *x, const double *y, const ptrdiff_t n) { - ptrdiff_t i; - __m128d XMM0, XMM1, XMM2, XMM3; - for (i=0; i<=((n)-4); i+=4) { - XMM0 = _mm_loadu_pd(x+i); - XMM1 = _mm_loadu_pd(x+i+2); - XMM2 = _mm_loadu_pd(y+i); - XMM3 = _mm_loadu_pd(y+i+2); - XMM2 = _mm_div_pd(XMM0, XMM2); - XMM3 = _mm_div_pd(XMM1, XMM3); - _mm_storeu_pd(z+i, XMM2); - _mm_storeu_pd(z+i+2, XMM3); - } - for (; i<(n); i++) { - z[i] = x[i] / y[i]; - } -} - -static void THDoubleVector_divs_SSE(double *y, const double *x, const double c, const ptrdiff_t n) { - ptrdiff_t i; - __m128d XMM7 = _mm_set1_pd(c); - __m128d XMM0, XMM1; - for (i=0; i<=((n)-4); i+=4) { - XMM0 = _mm_loadu_pd(x+i); - XMM1 = _mm_loadu_pd(x+i+2); - XMM0 = _mm_div_pd(XMM0, XMM7); - XMM1 = _mm_div_pd(XMM1, XMM7); - _mm_storeu_pd(y+i, XMM0); - _mm_storeu_pd(y+i+2, XMM1); - } - for (; i<(n); i++) { - y[i] = x[i] / c; - } -} - -static void THFloatVector_fill_SSE(float *x, const float c, const ptrdiff_t n) { - ptrdiff_t i; - __m128 XMM0 = _mm_set_ps1(c); - ptrdiff_t off; - for (i=0; i<=((n)-16); i+=16) { - _mm_storeu_ps((x)+i , XMM0); - _mm_storeu_ps((x)+i+4, XMM0); - _mm_storeu_ps((x)+i+8, XMM0); - _mm_storeu_ps((x)+i+12, XMM0); - } - off = (n) - ((n)%16); - for (i=0; i<((n)%16); i++) { - x[off+i] = c; - } -} - - -static void THFloatVector_cadd_SSE(float *z, const float *x, const float *y, const float c, const ptrdiff_t n) { - ptrdiff_t i; - __m128 XMM7 = _mm_set_ps1(c); - __m128 XMM0, XMM2; - for (i=0; i<=((n)-4); i+=4) { - XMM0 = _mm_loadu_ps((x)+i); - XMM2 = _mm_loadu_ps((y)+i); - XMM2 = _mm_mul_ps(XMM2, XMM7); - XMM2 = _mm_add_ps(XMM0, XMM2); - _mm_storeu_ps((z)+i, XMM2); - } - for (; i<(n); i++) { - z[i] = x[i] + c * y[i]; - } -} - -static void THFloatVector_adds_SSE(float *y, const float *x, const float c, const ptrdiff_t n) { - ptrdiff_t i; - __m128 XMM7 = _mm_set1_ps(c); - __m128 XMM0, XMM2; - for (i=0; i<=((n)-8); i+=8) { - XMM0 = _mm_loadu_ps((x)+i); - XMM2 = _mm_loadu_ps((x)+i+4); - XMM0 = _mm_add_ps(XMM0, XMM7); - XMM2 = _mm_add_ps(XMM2, XMM7); - _mm_storeu_ps((y)+i, XMM0); - _mm_storeu_ps((y)+i+4, XMM2); - } - for (; i<(n); i++) { - y[i] = x[i] + c; - } -} - -static void THFloatVector_cmul_SSE(float *z, const float *x, const float *y, const ptrdiff_t n) { - ptrdiff_t i; - for (i=0; i<=((n)-16); i+=16) { - __m128 XMM0 = _mm_loadu_ps((x)+i ); - __m128 XMM1 = _mm_loadu_ps((x)+i+ 4); - __m128 XMM2 = _mm_loadu_ps((x)+i+ 8); - __m128 XMM3 = _mm_loadu_ps((x)+i+12); - __m128 XMM4 = _mm_loadu_ps((y)+i ); - __m128 XMM5 = _mm_loadu_ps((y)+i+ 4); - __m128 XMM6 = _mm_loadu_ps((y)+i+ 8); - __m128 XMM7 = _mm_loadu_ps((y)+i+12); - XMM4 = _mm_mul_ps(XMM4, XMM0); - XMM5 = _mm_mul_ps(XMM5, XMM1); - XMM6 = _mm_mul_ps(XMM6, XMM2); - XMM7 = _mm_mul_ps(XMM7, XMM3); - _mm_storeu_ps((z)+i , XMM4); - _mm_storeu_ps((z)+i+ 4, XMM5); - _mm_storeu_ps((z)+i+ 8, XMM6); - _mm_storeu_ps((z)+i+12, XMM7); - } - for (; i<(n); i++) { - z[i] = x[i] * y[i]; - } -} - -static void THFloatVector_muls_SSE(float *y, const float *x, const float c, const ptrdiff_t n) { - ptrdiff_t i; - __m128 XMM15 = _mm_set_ps1(c); - for (i=0; i<=((n)-16); i+=16) { - __m128 XMM0 = _mm_loadu_ps((x)+i ); - __m128 XMM1 = _mm_loadu_ps((x)+i+ 4); - __m128 XMM2 = _mm_loadu_ps((x)+i+ 8); - __m128 XMM3 = _mm_loadu_ps((x)+i+12); - __m128 XMM4 = _mm_mul_ps(XMM15, XMM0); - __m128 XMM5 = _mm_mul_ps(XMM15, XMM1); - __m128 XMM6 = _mm_mul_ps(XMM15, XMM2); - __m128 XMM7 = _mm_mul_ps(XMM15, XMM3); - _mm_storeu_ps((y)+i , XMM4); - _mm_storeu_ps((y)+i+ 4, XMM5); - _mm_storeu_ps((y)+i+ 8, XMM6); - _mm_storeu_ps((y)+i+12, XMM7); - } - for (; i<(n); i++) { - y[i] = x[i] * c; - } -} - -static void THFloatVector_cdiv_SSE(float *z, const float *x, const float *y, const ptrdiff_t n) { - ptrdiff_t i; - __m128 XMM0, XMM1, XMM2, XMM3; - for (i=0; i<=((n)-8); i+=8) { - XMM0 = _mm_loadu_ps(x+i); - XMM1 = _mm_loadu_ps(x+i+4); - XMM2 = _mm_loadu_ps(y+i); - XMM3 = _mm_loadu_ps(y+i+4); - XMM2 = _mm_div_ps(XMM0, XMM2); - XMM3 = _mm_div_ps(XMM1, XMM3); - _mm_storeu_ps(z+i, XMM2); - _mm_storeu_ps(z+i+4, XMM3); - } - for (; i<(n); i++) { - z[i] = x[i] / y[i]; - } -} - -static void THFloatVector_divs_SSE(float *y, const float *x, const float c, const ptrdiff_t n) { - ptrdiff_t i; - __m128 XMM7 = _mm_set1_ps(c); - __m128 XMM0, XMM1; - for (i=0; i<=((n)-8); i+=8) { - XMM0 = _mm_loadu_ps(x+i); - XMM1 = _mm_loadu_ps(x+i+4); - XMM0 = _mm_div_ps(XMM0, XMM7); - XMM1 = _mm_div_ps(XMM1, XMM7); - _mm_storeu_ps(y+i, XMM0); - _mm_storeu_ps(y+i+4, XMM1); - } - for (; i<(n); i++) { - y[i] = x[i] / c; - } -} diff --git a/contrib/lua-torch/torch7/lib/TH/vector/VSX.c b/contrib/lua-torch/torch7/lib/TH/vector/VSX.c deleted file mode 100644 index 9ff984ad79..0000000000 --- a/contrib/lua-torch/torch7/lib/TH/vector/VSX.c +++ /dev/null @@ -1,2520 +0,0 @@ -#ifdef __PPC64__ -#include -#include - - -//-------------------------------------------------------------------------------------------------- -// THDoubleVector_fill_VSX: -//-------------------------------------------------------------------------------------------------- -static void THDoubleVector_fill_VSX(double *x, const double c, const ptrdiff_t n) -{ - ptrdiff_t i; - - double val[2] = {c, c}; - vector double fp64vec2 = vec_xl(0, val); - - for (i = 0; i <= n-128; i += 128) - { - vec_xst(fp64vec2, 0, x+(i )); - vec_xst(fp64vec2, 0, x+(i+2 )); - vec_xst(fp64vec2, 0, x+(i+4 )); - vec_xst(fp64vec2, 0, x+(i+6 )); - vec_xst(fp64vec2, 0, x+(i+8 )); - vec_xst(fp64vec2, 0, x+(i+10 )); - vec_xst(fp64vec2, 0, x+(i+12 )); - vec_xst(fp64vec2, 0, x+(i+14 )); - vec_xst(fp64vec2, 0, x+(i+16 )); - vec_xst(fp64vec2, 0, x+(i+18 )); - vec_xst(fp64vec2, 0, x+(i+20 )); - vec_xst(fp64vec2, 0, x+(i+22 )); - vec_xst(fp64vec2, 0, x+(i+24 )); - vec_xst(fp64vec2, 0, x+(i+26 )); - vec_xst(fp64vec2, 0, x+(i+28 )); - vec_xst(fp64vec2, 0, x+(i+30 )); - vec_xst(fp64vec2, 0, x+(i+32 )); - vec_xst(fp64vec2, 0, x+(i+34 )); - vec_xst(fp64vec2, 0, x+(i+36 )); - vec_xst(fp64vec2, 0, x+(i+38 )); - vec_xst(fp64vec2, 0, x+(i+40 )); - vec_xst(fp64vec2, 0, x+(i+42 )); - vec_xst(fp64vec2, 0, x+(i+44 )); - vec_xst(fp64vec2, 0, x+(i+46 )); - vec_xst(fp64vec2, 0, x+(i+48 )); - vec_xst(fp64vec2, 0, x+(i+50 )); - vec_xst(fp64vec2, 0, x+(i+52 )); - vec_xst(fp64vec2, 0, x+(i+54 )); - vec_xst(fp64vec2, 0, x+(i+56 )); - vec_xst(fp64vec2, 0, x+(i+58 )); - vec_xst(fp64vec2, 0, x+(i+60 )); - vec_xst(fp64vec2, 0, x+(i+62 )); - vec_xst(fp64vec2, 0, x+(i+64 )); - vec_xst(fp64vec2, 0, x+(i+66 )); - vec_xst(fp64vec2, 0, x+(i+68 )); - vec_xst(fp64vec2, 0, x+(i+70 )); - vec_xst(fp64vec2, 0, x+(i+72 )); - vec_xst(fp64vec2, 0, x+(i+74 )); - vec_xst(fp64vec2, 0, x+(i+76 )); - vec_xst(fp64vec2, 0, x+(i+78 )); - vec_xst(fp64vec2, 0, x+(i+80 )); - vec_xst(fp64vec2, 0, x+(i+82 )); - vec_xst(fp64vec2, 0, x+(i+84 )); - vec_xst(fp64vec2, 0, x+(i+86 )); - vec_xst(fp64vec2, 0, x+(i+88 )); - vec_xst(fp64vec2, 0, x+(i+90 )); - vec_xst(fp64vec2, 0, x+(i+92 )); - vec_xst(fp64vec2, 0, x+(i+94 )); - vec_xst(fp64vec2, 0, x+(i+96 )); - vec_xst(fp64vec2, 0, x+(i+98 )); - vec_xst(fp64vec2, 0, x+(i+100)); - vec_xst(fp64vec2, 0, x+(i+102)); - vec_xst(fp64vec2, 0, x+(i+104)); - vec_xst(fp64vec2, 0, x+(i+106)); - vec_xst(fp64vec2, 0, x+(i+108)); - vec_xst(fp64vec2, 0, x+(i+110)); - vec_xst(fp64vec2, 0, x+(i+112)); - vec_xst(fp64vec2, 0, x+(i+114)); - vec_xst(fp64vec2, 0, x+(i+116)); - vec_xst(fp64vec2, 0, x+(i+118)); - vec_xst(fp64vec2, 0, x+(i+120)); - vec_xst(fp64vec2, 0, x+(i+122)); - vec_xst(fp64vec2, 0, x+(i+124)); - vec_xst(fp64vec2, 0, x+(i+126)); - } - for (; i <= n-16; i += 16) - { - vec_xst(fp64vec2, 0, x+(i )); - vec_xst(fp64vec2, 0, x+(i+2 )); - vec_xst(fp64vec2, 0, x+(i+4 )); - vec_xst(fp64vec2, 0, x+(i+6 )); - vec_xst(fp64vec2, 0, x+(i+8 )); - vec_xst(fp64vec2, 0, x+(i+10 )); - vec_xst(fp64vec2, 0, x+(i+12 )); - vec_xst(fp64vec2, 0, x+(i+14 )); - } - for (; i <= n-2; i += 2) - vec_xst(fp64vec2, 0, x+(i )); - for (; i < n; i++) - x[i] = c; -} - - -//-------------------------------------------------------------------------------------------------- -// THDoubleVector_cadds_VSX: -//-------------------------------------------------------------------------------------------------- -static void THDoubleVector_cadd_VSX(double *z, const double *x, const double *y, const double c, const ptrdiff_t n) -{ - ptrdiff_t i; - - double val[2] = {c, c}; - vector double c_fp64vec2 = vec_xl(0, val); - - vector double y0_fp64vec2, y1_fp64vec2, y2_fp64vec2, y3_fp64vec2, y4_fp64vec2, y5_fp64vec2, y6_fp64vec2, y7_fp64vec2; - vector double y8_fp64vec2, y9_fp64vec2, y10_fp64vec2, y11_fp64vec2; - vector double x0_fp64vec2, x1_fp64vec2, x2_fp64vec2, x3_fp64vec2, x4_fp64vec2, x5_fp64vec2, x6_fp64vec2, x7_fp64vec2; - vector double x8_fp64vec2, x9_fp64vec2, x10_fp64vec2, x11_fp64vec2; - - - for (i = 0; i <= n-24; i += 24) - { - y0_fp64vec2 = vec_xl(0, y+(i )); - y1_fp64vec2 = vec_xl(0, y+(i+2 )); - y2_fp64vec2 = vec_xl(0, y+(i+4 )); - y3_fp64vec2 = vec_xl(0, y+(i+6 )); - y4_fp64vec2 = vec_xl(0, y+(i+8 )); - y5_fp64vec2 = vec_xl(0, y+(i+10)); - y6_fp64vec2 = vec_xl(0, y+(i+12)); - y7_fp64vec2 = vec_xl(0, y+(i+14)); - y8_fp64vec2 = vec_xl(0, y+(i+16)); - y9_fp64vec2 = vec_xl(0, y+(i+18)); - y10_fp64vec2 = vec_xl(0, y+(i+20)); - y11_fp64vec2 = vec_xl(0, y+(i+22)); - - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - x4_fp64vec2 = vec_xl(0, x+(i+8 )); - x5_fp64vec2 = vec_xl(0, x+(i+10)); - x6_fp64vec2 = vec_xl(0, x+(i+12)); - x7_fp64vec2 = vec_xl(0, x+(i+14)); - x8_fp64vec2 = vec_xl(0, x+(i+16)); - x9_fp64vec2 = vec_xl(0, x+(i+18)); - x10_fp64vec2 = vec_xl(0, x+(i+20)); - x11_fp64vec2 = vec_xl(0, x+(i+22)); - - y0_fp64vec2 = vec_madd(y0_fp64vec2, c_fp64vec2, x0_fp64vec2); - y1_fp64vec2 = vec_madd(y1_fp64vec2, c_fp64vec2, x1_fp64vec2); - y2_fp64vec2 = vec_madd(y2_fp64vec2, c_fp64vec2, x2_fp64vec2); - y3_fp64vec2 = vec_madd(y3_fp64vec2, c_fp64vec2, x3_fp64vec2); - y4_fp64vec2 = vec_madd(y4_fp64vec2, c_fp64vec2, x4_fp64vec2); - y5_fp64vec2 = vec_madd(y5_fp64vec2, c_fp64vec2, x5_fp64vec2); - y6_fp64vec2 = vec_madd(y6_fp64vec2, c_fp64vec2, x6_fp64vec2); - y7_fp64vec2 = vec_madd(y7_fp64vec2, c_fp64vec2, x7_fp64vec2); - y8_fp64vec2 = vec_madd(y8_fp64vec2, c_fp64vec2, x8_fp64vec2); - y9_fp64vec2 = vec_madd(y9_fp64vec2, c_fp64vec2, x9_fp64vec2); - y10_fp64vec2 = vec_madd(y10_fp64vec2, c_fp64vec2,x10_fp64vec2); - y11_fp64vec2 = vec_madd(y11_fp64vec2, c_fp64vec2,x11_fp64vec2); - - vec_xst(y0_fp64vec2, 0, z+(i )); - vec_xst(y1_fp64vec2, 0, z+(i+2 )); - vec_xst(y2_fp64vec2, 0, z+(i+4 )); - vec_xst(y3_fp64vec2, 0, z+(i+6 )); - vec_xst(y4_fp64vec2, 0, z+(i+8 )); - vec_xst(y5_fp64vec2, 0, z+(i+10)); - vec_xst(y6_fp64vec2, 0, z+(i+12)); - vec_xst(y7_fp64vec2, 0, z+(i+14)); - vec_xst(y8_fp64vec2, 0, z+(i+16)); - vec_xst(y9_fp64vec2, 0, z+(i+18)); - vec_xst(y10_fp64vec2, 0, z+(i+20)); - vec_xst(y11_fp64vec2, 0, z+(i+22)); - } - for (; i <= n-8; i += 8) - { - y0_fp64vec2 = vec_xl(0, y+(i )); - y1_fp64vec2 = vec_xl(0, y+(i+2 )); - y2_fp64vec2 = vec_xl(0, y+(i+4 )); - y3_fp64vec2 = vec_xl(0, y+(i+6 )); - - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - - y0_fp64vec2 = vec_madd(y0_fp64vec2, c_fp64vec2, x0_fp64vec2); - y1_fp64vec2 = vec_madd(y1_fp64vec2, c_fp64vec2, x1_fp64vec2); - y2_fp64vec2 = vec_madd(y2_fp64vec2, c_fp64vec2, x2_fp64vec2); - y3_fp64vec2 = vec_madd(y3_fp64vec2, c_fp64vec2, x3_fp64vec2); - - vec_xst(y0_fp64vec2, 0, z+(i )); - vec_xst(y1_fp64vec2, 0, z+(i+2 )); - vec_xst(y2_fp64vec2, 0, z+(i+4 )); - vec_xst(y3_fp64vec2, 0, z+(i+6 )); - } - for (; i <= n-2; i += 2) - { - y0_fp64vec2 = vec_xl(0, y+(i )); - x0_fp64vec2 = vec_xl(0, x+(i )); - y0_fp64vec2 = vec_madd(y0_fp64vec2, c_fp64vec2, x0_fp64vec2); - vec_xst(y0_fp64vec2, 0, z+(i )); - } - for (; i < n; i++) - z[i] = x[i] + c* y[i]; -} - - -//-------------------------------------------------------------------------------------------------- -// THDoubleVector_adds_VSX: -//-------------------------------------------------------------------------------------------------- -static void THDoubleVector_adds_VSX(double *y, const double *x, const double c, const ptrdiff_t n) -{ - ptrdiff_t i; - - double val[2] = {c, c}; - vector double c_fp64vec2 = vec_xl(0, val); - - vector double y0_fp64vec2, y1_fp64vec2, y2_fp64vec2, y3_fp64vec2, y4_fp64vec2, y5_fp64vec2, y6_fp64vec2, y7_fp64vec2; - vector double y8_fp64vec2, y9_fp64vec2, y10_fp64vec2, y11_fp64vec2; - vector double x0_fp64vec2, x1_fp64vec2, x2_fp64vec2, x3_fp64vec2, x4_fp64vec2, x5_fp64vec2, x6_fp64vec2, x7_fp64vec2; - vector double x8_fp64vec2, x9_fp64vec2, x10_fp64vec2, x11_fp64vec2; - - - for (i = 0; i <= n-24; i += 24) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - x4_fp64vec2 = vec_xl(0, x+(i+8 )); - x5_fp64vec2 = vec_xl(0, x+(i+10)); - x6_fp64vec2 = vec_xl(0, x+(i+12)); - x7_fp64vec2 = vec_xl(0, x+(i+14)); - x8_fp64vec2 = vec_xl(0, x+(i+16)); - x9_fp64vec2 = vec_xl(0, x+(i+18)); - x10_fp64vec2 = vec_xl(0, x+(i+20)); - x11_fp64vec2 = vec_xl(0, x+(i+22)); - - y0_fp64vec2 = vec_add(x0_fp64vec2, c_fp64vec2); - y1_fp64vec2 = vec_add(x1_fp64vec2, c_fp64vec2); - y2_fp64vec2 = vec_add(x2_fp64vec2, c_fp64vec2); - y3_fp64vec2 = vec_add(x3_fp64vec2, c_fp64vec2); - y4_fp64vec2 = vec_add(x4_fp64vec2, c_fp64vec2); - y5_fp64vec2 = vec_add(x5_fp64vec2, c_fp64vec2); - y6_fp64vec2 = vec_add(x6_fp64vec2, c_fp64vec2); - y7_fp64vec2 = vec_add(x7_fp64vec2, c_fp64vec2); - y8_fp64vec2 = vec_add(x8_fp64vec2, c_fp64vec2); - y9_fp64vec2 = vec_add(x9_fp64vec2, c_fp64vec2); - y10_fp64vec2 = vec_add(x10_fp64vec2, c_fp64vec2); - y11_fp64vec2 = vec_add(x11_fp64vec2, c_fp64vec2); - - - vec_xst(y0_fp64vec2, 0, y+(i )); - vec_xst(y1_fp64vec2, 0, y+(i+2 )); - vec_xst(y2_fp64vec2, 0, y+(i+4 )); - vec_xst(y3_fp64vec2, 0, y+(i+6 )); - vec_xst(y4_fp64vec2, 0, y+(i+8 )); - vec_xst(y5_fp64vec2, 0, y+(i+10)); - vec_xst(y6_fp64vec2, 0, y+(i+12)); - vec_xst(y7_fp64vec2, 0, y+(i+14)); - vec_xst(y8_fp64vec2, 0, y+(i+16)); - vec_xst(y9_fp64vec2, 0, y+(i+18)); - vec_xst(y10_fp64vec2, 0, y+(i+20)); - vec_xst(y11_fp64vec2, 0, y+(i+22)); - } - for (; i <= n-8; i += 8) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - - y0_fp64vec2 = vec_add(x0_fp64vec2, c_fp64vec2); - y1_fp64vec2 = vec_add(x1_fp64vec2, c_fp64vec2); - y2_fp64vec2 = vec_add(x2_fp64vec2, c_fp64vec2); - y3_fp64vec2 = vec_add(x3_fp64vec2, c_fp64vec2); - - vec_xst(y0_fp64vec2, 0, y+(i )); - vec_xst(y1_fp64vec2, 0, y+(i+2 )); - vec_xst(y2_fp64vec2, 0, y+(i+4 )); - vec_xst(y3_fp64vec2, 0, y+(i+6 )); - } - for (; i <= n-2; i += 2) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - y0_fp64vec2 = vec_add(x0_fp64vec2, c_fp64vec2); - vec_xst(y0_fp64vec2, 0, y+(i )); - } - for (; i < n; i++) - y[i] = x[i] +c; -} - - -//-------------------------------------------------------------------------------------------------- -// THDoubleVector_cmul_VSX: -//-------------------------------------------------------------------------------------------------- -static void THDoubleVector_cmul_VSX(double *z, const double *x, const double *y, const ptrdiff_t n) -{ - ptrdiff_t i; - - vector double y0_fp64vec2, y1_fp64vec2, y2_fp64vec2, y3_fp64vec2, y4_fp64vec2, y5_fp64vec2, y6_fp64vec2, y7_fp64vec2; - vector double y8_fp64vec2, y9_fp64vec2, y10_fp64vec2, y11_fp64vec2; - vector double x0_fp64vec2, x1_fp64vec2, x2_fp64vec2, x3_fp64vec2, x4_fp64vec2, x5_fp64vec2, x6_fp64vec2, x7_fp64vec2; - vector double x8_fp64vec2, x9_fp64vec2, x10_fp64vec2, x11_fp64vec2; - - - for (i = 0; i <= n-24; i += 24) - { - y0_fp64vec2 = vec_xl(0, y+(i )); - y1_fp64vec2 = vec_xl(0, y+(i+2 )); - y2_fp64vec2 = vec_xl(0, y+(i+4 )); - y3_fp64vec2 = vec_xl(0, y+(i+6 )); - y4_fp64vec2 = vec_xl(0, y+(i+8 )); - y5_fp64vec2 = vec_xl(0, y+(i+10)); - y6_fp64vec2 = vec_xl(0, y+(i+12)); - y7_fp64vec2 = vec_xl(0, y+(i+14)); - y8_fp64vec2 = vec_xl(0, y+(i+16)); - y9_fp64vec2 = vec_xl(0, y+(i+18)); - y10_fp64vec2 = vec_xl(0, y+(i+20)); - y11_fp64vec2 = vec_xl(0, y+(i+22)); - - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - x4_fp64vec2 = vec_xl(0, x+(i+8 )); - x5_fp64vec2 = vec_xl(0, x+(i+10)); - x6_fp64vec2 = vec_xl(0, x+(i+12)); - x7_fp64vec2 = vec_xl(0, x+(i+14)); - x8_fp64vec2 = vec_xl(0, x+(i+16)); - x9_fp64vec2 = vec_xl(0, x+(i+18)); - x10_fp64vec2 = vec_xl(0, x+(i+20)); - x11_fp64vec2 = vec_xl(0, x+(i+22)); - - y0_fp64vec2 = vec_mul(y0_fp64vec2, x0_fp64vec2); - y1_fp64vec2 = vec_mul(y1_fp64vec2, x1_fp64vec2); - y2_fp64vec2 = vec_mul(y2_fp64vec2, x2_fp64vec2); - y3_fp64vec2 = vec_mul(y3_fp64vec2, x3_fp64vec2); - y4_fp64vec2 = vec_mul(y4_fp64vec2, x4_fp64vec2); - y5_fp64vec2 = vec_mul(y5_fp64vec2, x5_fp64vec2); - y6_fp64vec2 = vec_mul(y6_fp64vec2, x6_fp64vec2); - y7_fp64vec2 = vec_mul(y7_fp64vec2, x7_fp64vec2); - y8_fp64vec2 = vec_mul(y8_fp64vec2, x8_fp64vec2); - y9_fp64vec2 = vec_mul(y9_fp64vec2, x9_fp64vec2); - y10_fp64vec2 = vec_mul(y10_fp64vec2, x10_fp64vec2); - y11_fp64vec2 = vec_mul(y11_fp64vec2, x11_fp64vec2); - - vec_xst(y0_fp64vec2, 0, z+(i )); - vec_xst(y1_fp64vec2, 0, z+(i+2 )); - vec_xst(y2_fp64vec2, 0, z+(i+4 )); - vec_xst(y3_fp64vec2, 0, z+(i+6 )); - vec_xst(y4_fp64vec2, 0, z+(i+8 )); - vec_xst(y5_fp64vec2, 0, z+(i+10)); - vec_xst(y6_fp64vec2, 0, z+(i+12)); - vec_xst(y7_fp64vec2, 0, z+(i+14)); - vec_xst(y8_fp64vec2, 0, z+(i+16)); - vec_xst(y9_fp64vec2, 0, z+(i+18)); - vec_xst(y10_fp64vec2, 0, z+(i+20)); - vec_xst(y11_fp64vec2, 0, z+(i+22)); - } - for (; i <= n-8; i += 8) - { - y0_fp64vec2 = vec_xl(0, y+(i )); - y1_fp64vec2 = vec_xl(0, y+(i+2 )); - y2_fp64vec2 = vec_xl(0, y+(i+4 )); - y3_fp64vec2 = vec_xl(0, y+(i+6 )); - - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - - y0_fp64vec2 = vec_mul(y0_fp64vec2, x0_fp64vec2); - y1_fp64vec2 = vec_mul(y1_fp64vec2, x1_fp64vec2); - y2_fp64vec2 = vec_mul(y2_fp64vec2, x2_fp64vec2); - y3_fp64vec2 = vec_mul(y3_fp64vec2, x3_fp64vec2); - - vec_xst(y0_fp64vec2, 0, z+(i )); - vec_xst(y1_fp64vec2, 0, z+(i+2 )); - vec_xst(y2_fp64vec2, 0, z+(i+4 )); - vec_xst(y3_fp64vec2, 0, z+(i+6 )); - } - for (; i <= n-2; i += 2) - { - y0_fp64vec2 = vec_xl(0, y+(i )); - x0_fp64vec2 = vec_xl(0, x+(i )); - y0_fp64vec2 = vec_mul(y0_fp64vec2, x0_fp64vec2); - vec_xst(y0_fp64vec2, 0, z+(i )); - } - for (; i < n; i++) - z[i] = x[i] * y[i]; -} - - -//-------------------------------------------------------------------------------------------------- -// THDoubleVector_muls_VSX: -//-------------------------------------------------------------------------------------------------- -static void THDoubleVector_muls_VSX(double *y, const double *x, const double c, const ptrdiff_t n) -{ - ptrdiff_t i; - - double val[2] = {c, c}; - vector double c_fp64vec2 = vec_xl(0, val); - - vector double y0_fp64vec2, y1_fp64vec2, y2_fp64vec2, y3_fp64vec2, y4_fp64vec2, y5_fp64vec2, y6_fp64vec2, y7_fp64vec2; - vector double y8_fp64vec2, y9_fp64vec2, y10_fp64vec2, y11_fp64vec2; - vector double x0_fp64vec2, x1_fp64vec2, x2_fp64vec2, x3_fp64vec2, x4_fp64vec2, x5_fp64vec2, x6_fp64vec2, x7_fp64vec2; - vector double x8_fp64vec2, x9_fp64vec2, x10_fp64vec2, x11_fp64vec2; - - - for (i = 0; i <= n-24; i += 24) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - x4_fp64vec2 = vec_xl(0, x+(i+8 )); - x5_fp64vec2 = vec_xl(0, x+(i+10)); - x6_fp64vec2 = vec_xl(0, x+(i+12)); - x7_fp64vec2 = vec_xl(0, x+(i+14)); - x8_fp64vec2 = vec_xl(0, x+(i+16)); - x9_fp64vec2 = vec_xl(0, x+(i+18)); - x10_fp64vec2 = vec_xl(0, x+(i+20)); - x11_fp64vec2 = vec_xl(0, x+(i+22)); - - y0_fp64vec2 = vec_mul(x0_fp64vec2, c_fp64vec2); - y1_fp64vec2 = vec_mul(x1_fp64vec2, c_fp64vec2); - y2_fp64vec2 = vec_mul(x2_fp64vec2, c_fp64vec2); - y3_fp64vec2 = vec_mul(x3_fp64vec2, c_fp64vec2); - y4_fp64vec2 = vec_mul(x4_fp64vec2, c_fp64vec2); - y5_fp64vec2 = vec_mul(x5_fp64vec2, c_fp64vec2); - y6_fp64vec2 = vec_mul(x6_fp64vec2, c_fp64vec2); - y7_fp64vec2 = vec_mul(x7_fp64vec2, c_fp64vec2); - y8_fp64vec2 = vec_mul(x8_fp64vec2, c_fp64vec2); - y9_fp64vec2 = vec_mul(x9_fp64vec2, c_fp64vec2); - y10_fp64vec2 = vec_mul(x10_fp64vec2, c_fp64vec2); - y11_fp64vec2 = vec_mul(x11_fp64vec2, c_fp64vec2); - - - vec_xst(y0_fp64vec2, 0, y+(i )); - vec_xst(y1_fp64vec2, 0, y+(i+2 )); - vec_xst(y2_fp64vec2, 0, y+(i+4 )); - vec_xst(y3_fp64vec2, 0, y+(i+6 )); - vec_xst(y4_fp64vec2, 0, y+(i+8 )); - vec_xst(y5_fp64vec2, 0, y+(i+10)); - vec_xst(y6_fp64vec2, 0, y+(i+12)); - vec_xst(y7_fp64vec2, 0, y+(i+14)); - vec_xst(y8_fp64vec2, 0, y+(i+16)); - vec_xst(y9_fp64vec2, 0, y+(i+18)); - vec_xst(y10_fp64vec2, 0, y+(i+20)); - vec_xst(y11_fp64vec2, 0, y+(i+22)); - } - for (; i <= n-8; i += 8) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - - y0_fp64vec2 = vec_mul(x0_fp64vec2, c_fp64vec2); - y1_fp64vec2 = vec_mul(x1_fp64vec2, c_fp64vec2); - y2_fp64vec2 = vec_mul(x2_fp64vec2, c_fp64vec2); - y3_fp64vec2 = vec_mul(x3_fp64vec2, c_fp64vec2); - - vec_xst(y0_fp64vec2, 0, y+(i )); - vec_xst(y1_fp64vec2, 0, y+(i+2 )); - vec_xst(y2_fp64vec2, 0, y+(i+4 )); - vec_xst(y3_fp64vec2, 0, y+(i+6 )); - } - for (; i <= n-2; i += 2) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - y0_fp64vec2 = vec_mul(x0_fp64vec2, c_fp64vec2); - vec_xst(y0_fp64vec2, 0, y+(i )); - } - for (; i < n; i++) - y[i] = c * x[i]; -} - - -//-------------------------------------------------------------------------------------------------- -// THDoubleVector_cdiv_VSX: -//-------------------------------------------------------------------------------------------------- -static void THDoubleVector_cdiv_VSX(double *z, const double *x, const double *y, const ptrdiff_t n) -{ - ptrdiff_t i; - - vector double y0_fp64vec2, y1_fp64vec2, y2_fp64vec2, y3_fp64vec2, y4_fp64vec2, y5_fp64vec2, y6_fp64vec2, y7_fp64vec2; - vector double y8_fp64vec2, y9_fp64vec2, y10_fp64vec2, y11_fp64vec2; - vector double x0_fp64vec2, x1_fp64vec2, x2_fp64vec2, x3_fp64vec2, x4_fp64vec2, x5_fp64vec2, x6_fp64vec2, x7_fp64vec2; - vector double x8_fp64vec2, x9_fp64vec2, x10_fp64vec2, x11_fp64vec2; - - - for (i = 0; i <= n-24; i += 24) - { - y0_fp64vec2 = vec_xl(0, y+(i )); - y1_fp64vec2 = vec_xl(0, y+(i+2 )); - y2_fp64vec2 = vec_xl(0, y+(i+4 )); - y3_fp64vec2 = vec_xl(0, y+(i+6 )); - y4_fp64vec2 = vec_xl(0, y+(i+8 )); - y5_fp64vec2 = vec_xl(0, y+(i+10)); - y6_fp64vec2 = vec_xl(0, y+(i+12)); - y7_fp64vec2 = vec_xl(0, y+(i+14)); - y8_fp64vec2 = vec_xl(0, y+(i+16)); - y9_fp64vec2 = vec_xl(0, y+(i+18)); - y10_fp64vec2 = vec_xl(0, y+(i+20)); - y11_fp64vec2 = vec_xl(0, y+(i+22)); - - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - x4_fp64vec2 = vec_xl(0, x+(i+8 )); - x5_fp64vec2 = vec_xl(0, x+(i+10)); - x6_fp64vec2 = vec_xl(0, x+(i+12)); - x7_fp64vec2 = vec_xl(0, x+(i+14)); - x8_fp64vec2 = vec_xl(0, x+(i+16)); - x9_fp64vec2 = vec_xl(0, x+(i+18)); - x10_fp64vec2 = vec_xl(0, x+(i+20)); - x11_fp64vec2 = vec_xl(0, x+(i+22)); - - y0_fp64vec2 = vec_div(x0_fp64vec2, y0_fp64vec2); - y1_fp64vec2 = vec_div(x1_fp64vec2, y1_fp64vec2); - y2_fp64vec2 = vec_div(x2_fp64vec2, y2_fp64vec2); - y3_fp64vec2 = vec_div(x3_fp64vec2, y3_fp64vec2); - y4_fp64vec2 = vec_div(x4_fp64vec2, y4_fp64vec2); - y5_fp64vec2 = vec_div(x5_fp64vec2, y5_fp64vec2); - y6_fp64vec2 = vec_div(x6_fp64vec2, y6_fp64vec2); - y7_fp64vec2 = vec_div(x7_fp64vec2, y7_fp64vec2); - y8_fp64vec2 = vec_div(x8_fp64vec2, y8_fp64vec2); - y9_fp64vec2 = vec_div(x9_fp64vec2, y9_fp64vec2); - y10_fp64vec2 = vec_div(x10_fp64vec2, y10_fp64vec2); - y11_fp64vec2 = vec_div(x11_fp64vec2, y11_fp64vec2); - - vec_xst(y0_fp64vec2, 0, z+(i )); - vec_xst(y1_fp64vec2, 0, z+(i+2 )); - vec_xst(y2_fp64vec2, 0, z+(i+4 )); - vec_xst(y3_fp64vec2, 0, z+(i+6 )); - vec_xst(y4_fp64vec2, 0, z+(i+8 )); - vec_xst(y5_fp64vec2, 0, z+(i+10)); - vec_xst(y6_fp64vec2, 0, z+(i+12)); - vec_xst(y7_fp64vec2, 0, z+(i+14)); - vec_xst(y8_fp64vec2, 0, z+(i+16)); - vec_xst(y9_fp64vec2, 0, z+(i+18)); - vec_xst(y10_fp64vec2, 0, z+(i+20)); - vec_xst(y11_fp64vec2, 0, z+(i+22)); - } - for (; i <= n-8; i += 8) - { - y0_fp64vec2 = vec_xl(0, y+(i )); - y1_fp64vec2 = vec_xl(0, y+(i+2 )); - y2_fp64vec2 = vec_xl(0, y+(i+4 )); - y3_fp64vec2 = vec_xl(0, y+(i+6 )); - - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - - y0_fp64vec2 = vec_div(x0_fp64vec2, y0_fp64vec2); - y1_fp64vec2 = vec_div(x1_fp64vec2, y1_fp64vec2); - y2_fp64vec2 = vec_div(x2_fp64vec2, y2_fp64vec2); - y3_fp64vec2 = vec_div(x3_fp64vec2, y3_fp64vec2); - - vec_xst(y0_fp64vec2, 0, z+(i )); - vec_xst(y1_fp64vec2, 0, z+(i+2 )); - vec_xst(y2_fp64vec2, 0, z+(i+4 )); - vec_xst(y3_fp64vec2, 0, z+(i+6 )); - } - for (; i <= n-2; i += 2) - { - y0_fp64vec2 = vec_xl(0, y+(i )); - x0_fp64vec2 = vec_xl(0, x+(i )); - y0_fp64vec2 = vec_div(x0_fp64vec2, y0_fp64vec2); - vec_xst(y0_fp64vec2, 0, z+(i )); - } - for (; i < n; i++) - z[i] = x[i] / y[i]; -} - - -//-------------------------------------------------------------------------------------------------- -// THDoubleVector_divs_VSX: -//-------------------------------------------------------------------------------------------------- -static void THDoubleVector_divs_VSX(double *y, const double *x, const double c, const ptrdiff_t n) -{ - ptrdiff_t i; - - double val[2] = {c, c}; - vector double c_fp64vec2 = vec_xl(0, val); - - vector double y0_fp64vec2, y1_fp64vec2, y2_fp64vec2, y3_fp64vec2, y4_fp64vec2, y5_fp64vec2, y6_fp64vec2, y7_fp64vec2; - vector double y8_fp64vec2, y9_fp64vec2, y10_fp64vec2, y11_fp64vec2; - vector double x0_fp64vec2, x1_fp64vec2, x2_fp64vec2, x3_fp64vec2, x4_fp64vec2, x5_fp64vec2, x6_fp64vec2, x7_fp64vec2; - vector double x8_fp64vec2, x9_fp64vec2, x10_fp64vec2, x11_fp64vec2; - - - for (i = 0; i <= n-24; i += 24) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - x4_fp64vec2 = vec_xl(0, x+(i+8 )); - x5_fp64vec2 = vec_xl(0, x+(i+10)); - x6_fp64vec2 = vec_xl(0, x+(i+12)); - x7_fp64vec2 = vec_xl(0, x+(i+14)); - x8_fp64vec2 = vec_xl(0, x+(i+16)); - x9_fp64vec2 = vec_xl(0, x+(i+18)); - x10_fp64vec2 = vec_xl(0, x+(i+20)); - x11_fp64vec2 = vec_xl(0, x+(i+22)); - - y0_fp64vec2 = vec_div(x0_fp64vec2, c_fp64vec2); - y1_fp64vec2 = vec_div(x1_fp64vec2, c_fp64vec2); - y2_fp64vec2 = vec_div(x2_fp64vec2, c_fp64vec2); - y3_fp64vec2 = vec_div(x3_fp64vec2, c_fp64vec2); - y4_fp64vec2 = vec_div(x4_fp64vec2, c_fp64vec2); - y5_fp64vec2 = vec_div(x5_fp64vec2, c_fp64vec2); - y6_fp64vec2 = vec_div(x6_fp64vec2, c_fp64vec2); - y7_fp64vec2 = vec_div(x7_fp64vec2, c_fp64vec2); - y8_fp64vec2 = vec_div(x8_fp64vec2, c_fp64vec2); - y9_fp64vec2 = vec_div(x9_fp64vec2, c_fp64vec2); - y10_fp64vec2 = vec_div(x10_fp64vec2, c_fp64vec2); - y11_fp64vec2 = vec_div(x11_fp64vec2, c_fp64vec2); - - - vec_xst(y0_fp64vec2, 0, y+(i )); - vec_xst(y1_fp64vec2, 0, y+(i+2 )); - vec_xst(y2_fp64vec2, 0, y+(i+4 )); - vec_xst(y3_fp64vec2, 0, y+(i+6 )); - vec_xst(y4_fp64vec2, 0, y+(i+8 )); - vec_xst(y5_fp64vec2, 0, y+(i+10)); - vec_xst(y6_fp64vec2, 0, y+(i+12)); - vec_xst(y7_fp64vec2, 0, y+(i+14)); - vec_xst(y8_fp64vec2, 0, y+(i+16)); - vec_xst(y9_fp64vec2, 0, y+(i+18)); - vec_xst(y10_fp64vec2, 0, y+(i+20)); - vec_xst(y11_fp64vec2, 0, y+(i+22)); - } - for (; i <= n-8; i += 8) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+2 )); - x2_fp64vec2 = vec_xl(0, x+(i+4 )); - x3_fp64vec2 = vec_xl(0, x+(i+6 )); - - y0_fp64vec2 = vec_div(x0_fp64vec2, c_fp64vec2); - y1_fp64vec2 = vec_div(x1_fp64vec2, c_fp64vec2); - y2_fp64vec2 = vec_div(x2_fp64vec2, c_fp64vec2); - y3_fp64vec2 = vec_div(x3_fp64vec2, c_fp64vec2); - - vec_xst(y0_fp64vec2, 0, y+(i )); - vec_xst(y1_fp64vec2, 0, y+(i+2 )); - vec_xst(y2_fp64vec2, 0, y+(i+4 )); - vec_xst(y3_fp64vec2, 0, y+(i+6 )); - - vec_xst(y0_fp64vec2, 0, y+(i )); - vec_xst(y1_fp64vec2, 0, y+(i+2 )); - vec_xst(y2_fp64vec2, 0, y+(i+4 )); - vec_xst(y3_fp64vec2, 0, y+(i+6 )); - } - for (; i <= n-2; i += 2) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - y0_fp64vec2 = vec_div(x0_fp64vec2, c_fp64vec2); - vec_xst(y0_fp64vec2, 0, y+(i )); - } - for (; i < n; i++) - y[i] = x[i] / c; -} - - -//-------------------------------------------------------------------------------------------------- -// THFloatVector_fill_VSX: -//-------------------------------------------------------------------------------------------------- -static void THFloatVector_fill_VSX(float *x, const float c, const ptrdiff_t n) -{ - ptrdiff_t i; - - float val[4] = {c, c, c, c}; - vector float fp32vec4 = vec_xl(0, val); - - for (i = 0; i <= n-256; i += 256) - { - vec_xst(fp32vec4, 0, x+(i )); - vec_xst(fp32vec4, 0, x+(i+4 )); - vec_xst(fp32vec4, 0, x+(i+8 )); - vec_xst(fp32vec4, 0, x+(i+12 )); - vec_xst(fp32vec4, 0, x+(i+16 )); - vec_xst(fp32vec4, 0, x+(i+20 )); - vec_xst(fp32vec4, 0, x+(i+24 )); - vec_xst(fp32vec4, 0, x+(i+28 )); - vec_xst(fp32vec4, 0, x+(i+32 )); - vec_xst(fp32vec4, 0, x+(i+36 )); - vec_xst(fp32vec4, 0, x+(i+40 )); - vec_xst(fp32vec4, 0, x+(i+44 )); - vec_xst(fp32vec4, 0, x+(i+48 )); - vec_xst(fp32vec4, 0, x+(i+52 )); - vec_xst(fp32vec4, 0, x+(i+56 )); - vec_xst(fp32vec4, 0, x+(i+60 )); - vec_xst(fp32vec4, 0, x+(i+64 )); - vec_xst(fp32vec4, 0, x+(i+68 )); - vec_xst(fp32vec4, 0, x+(i+72 )); - vec_xst(fp32vec4, 0, x+(i+76 )); - vec_xst(fp32vec4, 0, x+(i+80 )); - vec_xst(fp32vec4, 0, x+(i+84 )); - vec_xst(fp32vec4, 0, x+(i+88 )); - vec_xst(fp32vec4, 0, x+(i+92 )); - vec_xst(fp32vec4, 0, x+(i+96 )); - vec_xst(fp32vec4, 0, x+(i+100)); - vec_xst(fp32vec4, 0, x+(i+104)); - vec_xst(fp32vec4, 0, x+(i+108)); - vec_xst(fp32vec4, 0, x+(i+112)); - vec_xst(fp32vec4, 0, x+(i+116)); - vec_xst(fp32vec4, 0, x+(i+120)); - vec_xst(fp32vec4, 0, x+(i+124)); - vec_xst(fp32vec4, 0, x+(i+128)); - vec_xst(fp32vec4, 0, x+(i+132)); - vec_xst(fp32vec4, 0, x+(i+136)); - vec_xst(fp32vec4, 0, x+(i+140)); - vec_xst(fp32vec4, 0, x+(i+144)); - vec_xst(fp32vec4, 0, x+(i+148)); - vec_xst(fp32vec4, 0, x+(i+152)); - vec_xst(fp32vec4, 0, x+(i+156)); - vec_xst(fp32vec4, 0, x+(i+160)); - vec_xst(fp32vec4, 0, x+(i+164)); - vec_xst(fp32vec4, 0, x+(i+168)); - vec_xst(fp32vec4, 0, x+(i+172)); - vec_xst(fp32vec4, 0, x+(i+176)); - vec_xst(fp32vec4, 0, x+(i+180)); - vec_xst(fp32vec4, 0, x+(i+184)); - vec_xst(fp32vec4, 0, x+(i+188)); - vec_xst(fp32vec4, 0, x+(i+192)); - vec_xst(fp32vec4, 0, x+(i+196)); - vec_xst(fp32vec4, 0, x+(i+200)); - vec_xst(fp32vec4, 0, x+(i+204)); - vec_xst(fp32vec4, 0, x+(i+208)); - vec_xst(fp32vec4, 0, x+(i+212)); - vec_xst(fp32vec4, 0, x+(i+216)); - vec_xst(fp32vec4, 0, x+(i+220)); - vec_xst(fp32vec4, 0, x+(i+224)); - vec_xst(fp32vec4, 0, x+(i+228)); - vec_xst(fp32vec4, 0, x+(i+232)); - vec_xst(fp32vec4, 0, x+(i+236)); - vec_xst(fp32vec4, 0, x+(i+240)); - vec_xst(fp32vec4, 0, x+(i+244)); - vec_xst(fp32vec4, 0, x+(i+248)); - vec_xst(fp32vec4, 0, x+(i+252)); - } - for (; i <= n-32; i += 32) - { - vec_xst(fp32vec4, 0, x+(i )); - vec_xst(fp32vec4, 0, x+(i+4 )); - vec_xst(fp32vec4, 0, x+(i+8 )); - vec_xst(fp32vec4, 0, x+(i+12 )); - vec_xst(fp32vec4, 0, x+(i+16 )); - vec_xst(fp32vec4, 0, x+(i+20 )); - vec_xst(fp32vec4, 0, x+(i+24 )); - vec_xst(fp32vec4, 0, x+(i+28 )); - } - for (; i <= n-4; i += 4) - vec_xst(fp32vec4, 0, x+(i )); - for (; i < n; i++) - x[i] = c; -} - - -//-------------------------------------------------------------------------------------------------- -// THFloatVector_cadd_VSX: -//-------------------------------------------------------------------------------------------------- -static void THFloatVector_cadd_VSX(float *z, const float *x, const float *y, const float c, const ptrdiff_t n) -{ - ptrdiff_t i; - - float val[4] = {c, c, c, c}; - vector float c_fp32vec4 = vec_xl(0, val); - - vector float y0_fp32vec4, y1_fp32vec4, y2_fp32vec4, y3_fp32vec4, y4_fp32vec4, y5_fp32vec4, y6_fp32vec4, y7_fp32vec4; - vector float y8_fp32vec4, y9_fp32vec4, y10_fp32vec4, y11_fp32vec4; - vector float x0_fp32vec4, x1_fp32vec4, x2_fp32vec4, x3_fp32vec4, x4_fp32vec4, x5_fp32vec4, x6_fp32vec4, x7_fp32vec4; - vector float x8_fp32vec4, x9_fp32vec4, x10_fp32vec4, x11_fp32vec4; - - - for (i = 0; i <= n-48; i += 48) - { - y0_fp32vec4 = vec_xl(0, y+(i )); - y1_fp32vec4 = vec_xl(0, y+(i+4 )); - y2_fp32vec4 = vec_xl(0, y+(i+8 )); - y3_fp32vec4 = vec_xl(0, y+(i+12)); - y4_fp32vec4 = vec_xl(0, y+(i+16 )); - y5_fp32vec4 = vec_xl(0, y+(i+20)); - y6_fp32vec4 = vec_xl(0, y+(i+24)); - y7_fp32vec4 = vec_xl(0, y+(i+28)); - y8_fp32vec4 = vec_xl(0, y+(i+32)); - y9_fp32vec4 = vec_xl(0, y+(i+36)); - y10_fp32vec4 = vec_xl(0, y+(i+40)); - y11_fp32vec4 = vec_xl(0, y+(i+44)); - - x0_fp32vec4 = vec_xl(0, x+(i )); - x1_fp32vec4 = vec_xl(0, x+(i+4 )); - x2_fp32vec4 = vec_xl(0, x+(i+8 )); - x3_fp32vec4 = vec_xl(0, x+(i+12 )); - x4_fp32vec4 = vec_xl(0, x+(i+16 )); - x5_fp32vec4 = vec_xl(0, x+(i+20)); - x6_fp32vec4 = vec_xl(0, x+(i+24)); - x7_fp32vec4 = vec_xl(0, x+(i+28)); - x8_fp32vec4 = vec_xl(0, x+(i+32)); - x9_fp32vec4 = vec_xl(0, x+(i+36)); - x10_fp32vec4 = vec_xl(0, x+(i+40)); - x11_fp32vec4 = vec_xl(0, x+(i+44)); - - y0_fp32vec4 = vec_madd(y0_fp32vec4, c_fp32vec4, x0_fp32vec4); - y1_fp32vec4 = vec_madd(y1_fp32vec4, c_fp32vec4, x1_fp32vec4); - y2_fp32vec4 = vec_madd(y2_fp32vec4, c_fp32vec4, x2_fp32vec4); - y3_fp32vec4 = vec_madd(y3_fp32vec4, c_fp32vec4, x3_fp32vec4); - y4_fp32vec4 = vec_madd(y4_fp32vec4, c_fp32vec4, x4_fp32vec4); - y5_fp32vec4 = vec_madd(y5_fp32vec4, c_fp32vec4, x5_fp32vec4); - y6_fp32vec4 = vec_madd(y6_fp32vec4, c_fp32vec4, x6_fp32vec4); - y7_fp32vec4 = vec_madd(y7_fp32vec4, c_fp32vec4, x7_fp32vec4); - y8_fp32vec4 = vec_madd(y8_fp32vec4, c_fp32vec4, x8_fp32vec4); - y9_fp32vec4 = vec_madd(y9_fp32vec4, c_fp32vec4, x9_fp32vec4); - y10_fp32vec4 = vec_madd(y10_fp32vec4, c_fp32vec4, x10_fp32vec4); - y11_fp32vec4 = vec_madd(y11_fp32vec4, c_fp32vec4, x11_fp32vec4); - - vec_xst(y0_fp32vec4, 0, z+(i )); - vec_xst(y1_fp32vec4, 0, z+(i+4 )); - vec_xst(y2_fp32vec4, 0, z+(i+8 )); - vec_xst(y3_fp32vec4, 0, z+(i+12 )); - vec_xst(y4_fp32vec4, 0, z+(i+16 )); - vec_xst(y5_fp32vec4, 0, z+(i+20)); - vec_xst(y6_fp32vec4, 0, z+(i+24)); - vec_xst(y7_fp32vec4, 0, z+(i+28)); - vec_xst(y8_fp32vec4, 0, z+(i+32)); - vec_xst(y9_fp32vec4, 0, z+(i+36)); - vec_xst(y10_fp32vec4, 0, z+(i+40)); - vec_xst(y11_fp32vec4, 0, z+(i+44)); - } - for (; i <= n-16; i += 16) - { - y0_fp32vec4 = vec_xl(0, y+(i )); - y1_fp32vec4 = vec_xl(0, y+(i+4 )); - y2_fp32vec4 = vec_xl(0, y+(i+8 )); - y3_fp32vec4 = vec_xl(0, y+(i+12 )); - - x0_fp32vec4 = vec_xl(0, x+(i )); - x1_fp32vec4 = vec_xl(0, x+(i+4 )); - x2_fp32vec4 = vec_xl(0, x+(i+8 )); - x3_fp32vec4 = vec_xl(0, x+(i+12 )); - - y0_fp32vec4 = vec_madd(y0_fp32vec4, c_fp32vec4, x0_fp32vec4); - y1_fp32vec4 = vec_madd(y1_fp32vec4, c_fp32vec4, x1_fp32vec4); - y2_fp32vec4 = vec_madd(y2_fp32vec4, c_fp32vec4, x2_fp32vec4); - y3_fp32vec4 = vec_madd(y3_fp32vec4, c_fp32vec4, x3_fp32vec4); - - vec_xst(y0_fp32vec4, 0, z+(i )); - vec_xst(y1_fp32vec4, 0, z+(i+4 )); - vec_xst(y2_fp32vec4, 0, z+(i+8 )); - vec_xst(y3_fp32vec4, 0, z+(i+12 )); - } - for (; i <= n-4; i += 4) - { - y0_fp32vec4 = vec_xl(0, y+(i )); - x0_fp32vec4 = vec_xl(0, x+(i )); - y0_fp32vec4 = vec_madd(y0_fp32vec4, c_fp32vec4, x0_fp32vec4); - vec_xst(y0_fp32vec4, 0, z+(i )); - } - for (; i < n; i++) - z[i] = x[i] + c* y[i]; -} - - -//-------------------------------------------------------------------------------------------------- -// THFloatVector_adds_VSX: -//-------------------------------------------------------------------------------------------------- -static void THFloatVector_adds_VSX(float *y, const float *x, const float c, const ptrdiff_t n) -{ - ptrdiff_t i; - float val[4] = {c, c, c, c}; - vector float c_fp32vec4 = vec_xl(0, val); - - vector float y0_fp32vec4, y1_fp32vec4, y2_fp32vec4, y3_fp32vec4, y4_fp32vec4, y5_fp32vec4, y6_fp32vec4, y7_fp32vec4; - vector float y8_fp32vec4, y9_fp32vec4, y10_fp32vec4, y11_fp32vec4; - vector float x0_fp32vec4, x1_fp32vec4, x2_fp32vec4, x3_fp32vec4, x4_fp32vec4, x5_fp32vec4, x6_fp32vec4, x7_fp32vec4; - vector float x8_fp32vec4, x9_fp32vec4, x10_fp32vec4, x11_fp32vec4; - - - for (i = 0; i <= n-48; i += 48) - { - x0_fp32vec4 = vec_xl(0, x+(i )); - x1_fp32vec4 = vec_xl(0, x+(i+4 )); - x2_fp32vec4 = vec_xl(0, x+(i+8 )); - x3_fp32vec4 = vec_xl(0, x+(i+12)); - x4_fp32vec4 = vec_xl(0, x+(i+16)); - x5_fp32vec4 = vec_xl(0, x+(i+20)); - x6_fp32vec4 = vec_xl(0, x+(i+24)); - x7_fp32vec4 = vec_xl(0, x+(i+28)); - x8_fp32vec4 = vec_xl(0, x+(i+32)); - x9_fp32vec4 = vec_xl(0, x+(i+36)); - x10_fp32vec4 = vec_xl(0, x+(i+40)); - x11_fp32vec4 = vec_xl(0, x+(i+44)); - - y0_fp32vec4 = vec_add(x0_fp32vec4, c_fp32vec4); - y1_fp32vec4 = vec_add(x1_fp32vec4, c_fp32vec4); - y2_fp32vec4 = vec_add(x2_fp32vec4, c_fp32vec4); - y3_fp32vec4 = vec_add(x3_fp32vec4, c_fp32vec4); - y4_fp32vec4 = vec_add(x4_fp32vec4, c_fp32vec4); - y5_fp32vec4 = vec_add(x5_fp32vec4, c_fp32vec4); - y6_fp32vec4 = vec_add(x6_fp32vec4, c_fp32vec4); - y7_fp32vec4 = vec_add(x7_fp32vec4, c_fp32vec4); - y8_fp32vec4 = vec_add(x8_fp32vec4, c_fp32vec4); - y9_fp32vec4 = vec_add(x9_fp32vec4, c_fp32vec4); - y10_fp32vec4 = vec_add(x10_fp32vec4, c_fp32vec4); - y11_fp32vec4 = vec_add(x11_fp32vec4, c_fp32vec4); - - vec_xst(y0_fp32vec4, 0, y+(i )); - vec_xst(y1_fp32vec4, 0, y+(i+4 )); - vec_xst(y2_fp32vec4, 0, y+(i+8 )); - vec_xst(y3_fp32vec4, 0, y+(i+12)); - vec_xst(y4_fp32vec4, 0, y+(i+16)); - vec_xst(y5_fp32vec4, 0, y+(i+20)); - vec_xst(y6_fp32vec4, 0, y+(i+24)); - vec_xst(y7_fp32vec4, 0, y+(i+28)); - vec_xst(y8_fp32vec4, 0, y+(i+32)); - vec_xst(y9_fp32vec4, 0, y+(i+36)); - vec_xst(y10_fp32vec4, 0, y+(i+40)); - vec_xst(y11_fp32vec4, 0, y+(i+44)); - } - for (; i <= n-16; i += 16) - { - x0_fp32vec4 = vec_xl(0, x+(i )); - x1_fp32vec4 = vec_xl(0, x+(i+4 )); - x2_fp32vec4 = vec_xl(0, x+(i+8 )); - x3_fp32vec4 = vec_xl(0, x+(i+12)); - - y0_fp32vec4 = vec_add(x0_fp32vec4, c_fp32vec4); - y1_fp32vec4 = vec_add(x1_fp32vec4, c_fp32vec4); - y2_fp32vec4 = vec_add(x2_fp32vec4, c_fp32vec4); - y3_fp32vec4 = vec_add(x3_fp32vec4, c_fp32vec4); - - vec_xst(y0_fp32vec4, 0, y+(i )); - vec_xst(y1_fp32vec4, 0, y+(i+4 )); - vec_xst(y2_fp32vec4, 0, y+(i+8 )); - vec_xst(y3_fp32vec4, 0, y+(i+12)); - } - for (; i <= n-4; i += 4) - { - x0_fp32vec4 = vec_xl(0, x+(i )); - y0_fp32vec4 = vec_add(x0_fp32vec4, c_fp32vec4); - vec_xst(y0_fp32vec4, 0, y+(i )); - } - for (; i < n; i++) - y[i] = c + x[i]; -} - - -//-------------------------------------------------------------------------------------------------- -// THFloatVector_cmul_VSX: -//-------------------------------------------------------------------------------------------------- -static void THFloatVector_cmul_VSX(float *z, const float *y, const float *x, const ptrdiff_t n) -{ - ptrdiff_t i; - - vector float y0_fp32vec4, y1_fp32vec4, y2_fp32vec4, y3_fp32vec4, y4_fp32vec4, y5_fp32vec4, y6_fp32vec4, y7_fp32vec4; - vector float y8_fp32vec4, y9_fp32vec4, y10_fp32vec4, y11_fp32vec4; - vector float x0_fp32vec4, x1_fp32vec4, x2_fp32vec4, x3_fp32vec4, x4_fp32vec4, x5_fp32vec4, x6_fp32vec4, x7_fp32vec4; - vector float x8_fp32vec4, x9_fp32vec4, x10_fp32vec4, x11_fp32vec4; - - - for (i = 0; i <= n-48; i += 48) - { - y0_fp32vec4 = vec_xl(0, y+(i )); - y1_fp32vec4 = vec_xl(0, y+(i+4 )); - y2_fp32vec4 = vec_xl(0, y+(i+8 )); - y3_fp32vec4 = vec_xl(0, y+(i+12 )); - y4_fp32vec4 = vec_xl(0, y+(i+16 )); - y5_fp32vec4 = vec_xl(0, y+(i+20)); - y6_fp32vec4 = vec_xl(0, y+(i+24)); - y7_fp32vec4 = vec_xl(0, y+(i+28)); - y8_fp32vec4 = vec_xl(0, y+(i+32)); - y9_fp32vec4 = vec_xl(0, y+(i+36)); - y10_fp32vec4 = vec_xl(0, y+(i+40)); - y11_fp32vec4 = vec_xl(0, y+(i+44)); - - x0_fp32vec4 = vec_xl(0, x+(i )); - x1_fp32vec4 = vec_xl(0, x+(i+4 )); - x2_fp32vec4 = vec_xl(0, x+(i+8 )); - x3_fp32vec4 = vec_xl(0, x+(i+12 )); - x4_fp32vec4 = vec_xl(0, x+(i+16 )); - x5_fp32vec4 = vec_xl(0, x+(i+20)); - x6_fp32vec4 = vec_xl(0, x+(i+24)); - x7_fp32vec4 = vec_xl(0, x+(i+28)); - x8_fp32vec4 = vec_xl(0, x+(i+32)); - x9_fp32vec4 = vec_xl(0, x+(i+36)); - x10_fp32vec4 = vec_xl(0, x+(i+40)); - x11_fp32vec4 = vec_xl(0, x+(i+44)); - - y0_fp32vec4 = vec_mul(y0_fp32vec4, x0_fp32vec4); - y1_fp32vec4 = vec_mul(y1_fp32vec4, x1_fp32vec4); - y2_fp32vec4 = vec_mul(y2_fp32vec4, x2_fp32vec4); - y3_fp32vec4 = vec_mul(y3_fp32vec4, x3_fp32vec4); - y4_fp32vec4 = vec_mul(y4_fp32vec4, x4_fp32vec4); - y5_fp32vec4 = vec_mul(y5_fp32vec4, x5_fp32vec4); - y6_fp32vec4 = vec_mul(y6_fp32vec4, x6_fp32vec4); - y7_fp32vec4 = vec_mul(y7_fp32vec4, x7_fp32vec4); - y8_fp32vec4 = vec_mul(y8_fp32vec4, x8_fp32vec4); - y9_fp32vec4 = vec_mul(y9_fp32vec4, x9_fp32vec4); - y10_fp32vec4 = vec_mul(y10_fp32vec4, x10_fp32vec4); - y11_fp32vec4 = vec_mul(y11_fp32vec4, x11_fp32vec4); - - vec_xst(y0_fp32vec4, 0, z+(i )); - vec_xst(y1_fp32vec4, 0, z+(i+4 )); - vec_xst(y2_fp32vec4, 0, z+(i+8 )); - vec_xst(y3_fp32vec4, 0, z+(i+12 )); - vec_xst(y4_fp32vec4, 0, z+(i+16 )); - vec_xst(y5_fp32vec4, 0, z+(i+20)); - vec_xst(y6_fp32vec4, 0, z+(i+24)); - vec_xst(y7_fp32vec4, 0, z+(i+28)); - vec_xst(y8_fp32vec4, 0, z+(i+32)); - vec_xst(y9_fp32vec4, 0, z+(i+36)); - vec_xst(y10_fp32vec4, 0, z+(i+40)); - vec_xst(y11_fp32vec4, 0, z+(i+44)); - } - for (; i <= n-16; i += 16) - { - y0_fp32vec4 = vec_xl(0, y+(i )); - y1_fp32vec4 = vec_xl(0, y+(i+4 )); - y2_fp32vec4 = vec_xl(0, y+(i+8 )); - y3_fp32vec4 = vec_xl(0, y+(i+12 )); - - x0_fp32vec4 = vec_xl(0, x+(i )); - x1_fp32vec4 = vec_xl(0, x+(i+4 )); - x2_fp32vec4 = vec_xl(0, x+(i+8 )); - x3_fp32vec4 = vec_xl(0, x+(i+12 )); - - y0_fp32vec4 = vec_mul(y0_fp32vec4, x0_fp32vec4); - y1_fp32vec4 = vec_mul(y1_fp32vec4, x1_fp32vec4); - y2_fp32vec4 = vec_mul(y2_fp32vec4, x2_fp32vec4); - y3_fp32vec4 = vec_mul(y3_fp32vec4, x3_fp32vec4); - - vec_xst(y0_fp32vec4, 0, z+(i )); - vec_xst(y1_fp32vec4, 0, z+(i+4 )); - vec_xst(y2_fp32vec4, 0, z+(i+8 )); - vec_xst(y3_fp32vec4, 0, z+(i+12 )); - } - for (; i <= n-4; i += 4) - { - y0_fp32vec4 = vec_xl(0, y+(i )); - x0_fp32vec4 = vec_xl(0, x+(i )); - y0_fp32vec4 = vec_mul(y0_fp32vec4, x0_fp32vec4); - vec_xst(y0_fp32vec4, 0, z+(i )); - } - for (; i < n; i++) - z[i] = y[i] * x[i]; -} - - -//-------------------------------------------------------------------------------------------------- -// THFloatVector_muls_VSX: -//-------------------------------------------------------------------------------------------------- -static void THFloatVector_muls_VSX(float *y, const float *x, const float c, const ptrdiff_t n) -{ - ptrdiff_t i; - float val[4] = {c, c, c, c}; - vector float c_fp32vec4 = vec_xl(0, val); - - vector float y0_fp32vec4, y1_fp32vec4, y2_fp32vec4, y3_fp32vec4, y4_fp32vec4, y5_fp32vec4, y6_fp32vec4, y7_fp32vec4; - vector float y8_fp32vec4, y9_fp32vec4, y10_fp32vec4, y11_fp32vec4; - vector float x0_fp32vec4, x1_fp32vec4, x2_fp32vec4, x3_fp32vec4, x4_fp32vec4, x5_fp32vec4, x6_fp32vec4, x7_fp32vec4; - vector float x8_fp32vec4, x9_fp32vec4, x10_fp32vec4, x11_fp32vec4; - - - for (i = 0; i <= n-48; i += 48) - { - x0_fp32vec4 = vec_xl(0, x+(i )); - x1_fp32vec4 = vec_xl(0, x+(i+4 )); - x2_fp32vec4 = vec_xl(0, x+(i+8 )); - x3_fp32vec4 = vec_xl(0, x+(i+12)); - x4_fp32vec4 = vec_xl(0, x+(i+16)); - x5_fp32vec4 = vec_xl(0, x+(i+20)); - x6_fp32vec4 = vec_xl(0, x+(i+24)); - x7_fp32vec4 = vec_xl(0, x+(i+28)); - x8_fp32vec4 = vec_xl(0, x+(i+32)); - x9_fp32vec4 = vec_xl(0, x+(i+36)); - x10_fp32vec4 = vec_xl(0, x+(i+40)); - x11_fp32vec4 = vec_xl(0, x+(i+44)); - - y0_fp32vec4 = vec_mul(x0_fp32vec4, c_fp32vec4); - y1_fp32vec4 = vec_mul(x1_fp32vec4, c_fp32vec4); - y2_fp32vec4 = vec_mul(x2_fp32vec4, c_fp32vec4); - y3_fp32vec4 = vec_mul(x3_fp32vec4, c_fp32vec4); - y4_fp32vec4 = vec_mul(x4_fp32vec4, c_fp32vec4); - y5_fp32vec4 = vec_mul(x5_fp32vec4, c_fp32vec4); - y6_fp32vec4 = vec_mul(x6_fp32vec4, c_fp32vec4); - y7_fp32vec4 = vec_mul(x7_fp32vec4, c_fp32vec4); - y8_fp32vec4 = vec_mul(x8_fp32vec4, c_fp32vec4); - y9_fp32vec4 = vec_mul(x9_fp32vec4, c_fp32vec4); - y10_fp32vec4 = vec_mul(x10_fp32vec4, c_fp32vec4); - y11_fp32vec4 = vec_mul(x11_fp32vec4, c_fp32vec4); - - vec_xst(y0_fp32vec4, 0, y+(i )); - vec_xst(y1_fp32vec4, 0, y+(i+4 )); - vec_xst(y2_fp32vec4, 0, y+(i+8 )); - vec_xst(y3_fp32vec4, 0, y+(i+12)); - vec_xst(y4_fp32vec4, 0, y+(i+16)); - vec_xst(y5_fp32vec4, 0, y+(i+20)); - vec_xst(y6_fp32vec4, 0, y+(i+24)); - vec_xst(y7_fp32vec4, 0, y+(i+28)); - vec_xst(y8_fp32vec4, 0, y+(i+32)); - vec_xst(y9_fp32vec4, 0, y+(i+36)); - vec_xst(y10_fp32vec4, 0, y+(i+40)); - vec_xst(y11_fp32vec4, 0, y+(i+44)); - } - for (; i <= n-16; i += 16) - { - x0_fp32vec4 = vec_xl(0, x+(i )); - x1_fp32vec4 = vec_xl(0, x+(i+4 )); - x2_fp32vec4 = vec_xl(0, x+(i+8 )); - x3_fp32vec4 = vec_xl(0, x+(i+12)); - - y0_fp32vec4 = vec_mul(x0_fp32vec4, c_fp32vec4); - y1_fp32vec4 = vec_mul(x1_fp32vec4, c_fp32vec4); - y2_fp32vec4 = vec_mul(x2_fp32vec4, c_fp32vec4); - y3_fp32vec4 = vec_mul(x3_fp32vec4, c_fp32vec4); - - vec_xst(y0_fp32vec4, 0, y+(i )); - vec_xst(y1_fp32vec4, 0, y+(i+4 )); - vec_xst(y2_fp32vec4, 0, y+(i+8 )); - vec_xst(y3_fp32vec4, 0, y+(i+12)); - } - for (; i <= n-4; i += 4) - { - x0_fp32vec4 = vec_xl(0, x+(i )); - y0_fp32vec4 = vec_mul(x0_fp32vec4, c_fp32vec4); - vec_xst(y0_fp32vec4, 0, y+(i )); - } - for (; i < n; i++) - y[i] = c * x[i]; -} - - -//-------------------------------------------------------------------------------------------------- -// THFloatVector_cdiv_VSX: -//-------------------------------------------------------------------------------------------------- -static void THFloatVector_cdiv_VSX(float *z, const float *x, const float *y, const ptrdiff_t n) -{ - ptrdiff_t i; - - vector float y0_fp32vec4, y1_fp32vec4, y2_fp32vec4, y3_fp32vec4, y4_fp32vec4, y5_fp32vec4, y6_fp32vec4, y7_fp32vec4; - vector float y8_fp32vec4, y9_fp32vec4, y10_fp32vec4, y11_fp32vec4; - vector float x0_fp32vec4, x1_fp32vec4, x2_fp32vec4, x3_fp32vec4, x4_fp32vec4, x5_fp32vec4, x6_fp32vec4, x7_fp32vec4; - vector float x8_fp32vec4, x9_fp32vec4, x10_fp32vec4, x11_fp32vec4; - - - for (i = 0; i <= n-48; i += 48) - { - y0_fp32vec4 = vec_xl(0, y+(i )); - y1_fp32vec4 = vec_xl(0, y+(i+4)); - y2_fp32vec4 = vec_xl(0, y+(i+8)); - y3_fp32vec4 = vec_xl(0, y+(i+12)); - y4_fp32vec4 = vec_xl(0, y+(i+16)); - y5_fp32vec4 = vec_xl(0, y+(i+20)); - y6_fp32vec4 = vec_xl(0, y+(i+24)); - y7_fp32vec4 = vec_xl(0, y+(i+28)); - y8_fp32vec4 = vec_xl(0, y+(i+32)); - y9_fp32vec4 = vec_xl(0, y+(i+36)); - y10_fp32vec4 = vec_xl(0, y+(i+40)); - y11_fp32vec4 = vec_xl(0, y+(i+44)); - - x0_fp32vec4 = vec_xl(0, x+(i )); - x1_fp32vec4 = vec_xl(0, x+(i+4 )); - x2_fp32vec4 = vec_xl(0, x+(i+8 )); - x3_fp32vec4 = vec_xl(0, x+(i+12 )); - x4_fp32vec4 = vec_xl(0, x+(i+16 )); - x5_fp32vec4 = vec_xl(0, x+(i+20)); - x6_fp32vec4 = vec_xl(0, x+(i+24)); - x7_fp32vec4 = vec_xl(0, x+(i+28)); - x8_fp32vec4 = vec_xl(0, x+(i+32)); - x9_fp32vec4 = vec_xl(0, x+(i+36)); - x10_fp32vec4 = vec_xl(0, x+(i+40)); - x11_fp32vec4 = vec_xl(0, x+(i+44)); - - y0_fp32vec4 = vec_div(x0_fp32vec4, y0_fp32vec4); - y1_fp32vec4 = vec_div(x1_fp32vec4, y1_fp32vec4); - y2_fp32vec4 = vec_div(x2_fp32vec4, y2_fp32vec4); - y3_fp32vec4 = vec_div(x3_fp32vec4, y3_fp32vec4); - y4_fp32vec4 = vec_div(x4_fp32vec4, y4_fp32vec4); - y5_fp32vec4 = vec_div(x5_fp32vec4, y5_fp32vec4); - y6_fp32vec4 = vec_div(x6_fp32vec4, y6_fp32vec4); - y7_fp32vec4 = vec_div(x7_fp32vec4, y7_fp32vec4); - y8_fp32vec4 = vec_div(x8_fp32vec4, y8_fp32vec4); - y9_fp32vec4 = vec_div(x9_fp32vec4, y9_fp32vec4); - y10_fp32vec4 = vec_div(x10_fp32vec4, y10_fp32vec4); - y11_fp32vec4 = vec_div(x11_fp32vec4, y11_fp32vec4); - - vec_xst(y0_fp32vec4, 0, z+(i )); - vec_xst(y1_fp32vec4, 0, z+(i+4 )); - vec_xst(y2_fp32vec4, 0, z+(i+8 )); - vec_xst(y3_fp32vec4, 0, z+(i+12 )); - vec_xst(y4_fp32vec4, 0, z+(i+16 )); - vec_xst(y5_fp32vec4, 0, z+(i+20)); - vec_xst(y6_fp32vec4, 0, z+(i+24)); - vec_xst(y7_fp32vec4, 0, z+(i+28)); - vec_xst(y8_fp32vec4, 0, z+(i+32)); - vec_xst(y9_fp32vec4, 0, z+(i+36)); - vec_xst(y10_fp32vec4, 0, z+(i+40)); - vec_xst(y11_fp32vec4, 0, z+(i+44)); - } - for (; i <= n-16; i += 16) - { - y0_fp32vec4 = vec_xl(0, y+(i )); - y1_fp32vec4 = vec_xl(0, y+(i+4 )); - y2_fp32vec4 = vec_xl(0, y+(i+8 )); - y3_fp32vec4 = vec_xl(0, y+(i+12 )); - - x0_fp32vec4 = vec_xl(0, x+(i )); - x1_fp32vec4 = vec_xl(0, x+(i+4 )); - x2_fp32vec4 = vec_xl(0, x+(i+8 )); - x3_fp32vec4 = vec_xl(0, x+(i+12 )); - - y0_fp32vec4 = vec_div(x0_fp32vec4, y0_fp32vec4); - y1_fp32vec4 = vec_div(x1_fp32vec4, y1_fp32vec4); - y2_fp32vec4 = vec_div(x2_fp32vec4, y2_fp32vec4); - y3_fp32vec4 = vec_div(x3_fp32vec4, y3_fp32vec4); - - vec_xst(y0_fp32vec4, 0, z+(i )); - vec_xst(y1_fp32vec4, 0, z+(i+4 )); - vec_xst(y2_fp32vec4, 0, z+(i+8 )); - vec_xst(y3_fp32vec4, 0, z+(i+12 )); - } - for (; i <= n-4; i += 4) - { - y0_fp32vec4 = vec_xl(0, y+(i )); - x0_fp32vec4 = vec_xl(0, x+(i )); - y0_fp32vec4 = vec_div(x0_fp32vec4, y0_fp32vec4); - vec_xst(y0_fp32vec4, 0, z+(i )); - } - for (; i < n; i++) - z[i] = x[i] / y[i]; -} - - -//-------------------------------------------------------------------------------------------------- -// THFloatVector_divs_VSX: -//-------------------------------------------------------------------------------------------------- -static void THFloatVector_divs_VSX(float *y, const float*x, const float c, const ptrdiff_t n) -{ - ptrdiff_t i; - - float val[4] = {c, c, c, c}; - vector float c_fp64vec2 = vec_xl(0, val); - - vector float y0_fp64vec2, y1_fp64vec2, y2_fp64vec2, y3_fp64vec2, y4_fp64vec2, y5_fp64vec2, y6_fp64vec2, y7_fp64vec2; - vector float y8_fp64vec2, y9_fp64vec2, y10_fp64vec2, y11_fp64vec2; - vector float x0_fp64vec2, x1_fp64vec2, x2_fp64vec2, x3_fp64vec2, x4_fp64vec2, x5_fp64vec2, x6_fp64vec2, x7_fp64vec2; - vector float x8_fp64vec2, x9_fp64vec2, x10_fp64vec2, x11_fp64vec2; - - - for (i = 0; i <= n-48; i += 48) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+4 )); - x2_fp64vec2 = vec_xl(0, x+(i+8 )); - x3_fp64vec2 = vec_xl(0, x+(i+12 )); - x4_fp64vec2 = vec_xl(0, x+(i+16 )); - x5_fp64vec2 = vec_xl(0, x+(i+20)); - x6_fp64vec2 = vec_xl(0, x+(i+24)); - x7_fp64vec2 = vec_xl(0, x+(i+28)); - x8_fp64vec2 = vec_xl(0, x+(i+32)); - x9_fp64vec2 = vec_xl(0, x+(i+36)); - x10_fp64vec2 = vec_xl(0, x+(i+40)); - x11_fp64vec2 = vec_xl(0, x+(i+44)); - - y0_fp64vec2 = vec_div(x0_fp64vec2, c_fp64vec2); - y1_fp64vec2 = vec_div(x1_fp64vec2, c_fp64vec2); - y2_fp64vec2 = vec_div(x2_fp64vec2, c_fp64vec2); - y3_fp64vec2 = vec_div(x3_fp64vec2, c_fp64vec2); - y4_fp64vec2 = vec_div(x4_fp64vec2, c_fp64vec2); - y5_fp64vec2 = vec_div(x5_fp64vec2, c_fp64vec2); - y6_fp64vec2 = vec_div(x6_fp64vec2, c_fp64vec2); - y7_fp64vec2 = vec_div(x7_fp64vec2, c_fp64vec2); - y8_fp64vec2 = vec_div(x8_fp64vec2, c_fp64vec2); - y9_fp64vec2 = vec_div(x9_fp64vec2, c_fp64vec2); - y10_fp64vec2 = vec_div(x10_fp64vec2, c_fp64vec2); - y11_fp64vec2 = vec_div(x11_fp64vec2, c_fp64vec2); - - - vec_xst(y0_fp64vec2, 0, y+(i )); - vec_xst(y1_fp64vec2, 0, y+(i+4 )); - vec_xst(y2_fp64vec2, 0, y+(i+8 )); - vec_xst(y3_fp64vec2, 0, y+(i+12 )); - vec_xst(y4_fp64vec2, 0, y+(i+16 )); - vec_xst(y5_fp64vec2, 0, y+(i+20)); - vec_xst(y6_fp64vec2, 0, y+(i+24)); - vec_xst(y7_fp64vec2, 0, y+(i+28)); - vec_xst(y8_fp64vec2, 0, y+(i+32)); - vec_xst(y9_fp64vec2, 0, y+(i+36)); - vec_xst(y10_fp64vec2, 0, y+(i+40)); - vec_xst(y11_fp64vec2, 0, y+(i+44)); - } - for (; i <= n-16; i += 16) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - x1_fp64vec2 = vec_xl(0, x+(i+4 )); - x2_fp64vec2 = vec_xl(0, x+(i+8 )); - x3_fp64vec2 = vec_xl(0, x+(i+12 )); - - y0_fp64vec2 = vec_div(x0_fp64vec2, c_fp64vec2); - y1_fp64vec2 = vec_div(x1_fp64vec2, c_fp64vec2); - y2_fp64vec2 = vec_div(x2_fp64vec2, c_fp64vec2); - y3_fp64vec2 = vec_div(x3_fp64vec2, c_fp64vec2); - - vec_xst(y0_fp64vec2, 0, y+(i )); - vec_xst(y1_fp64vec2, 0, y+(i+4 )); - vec_xst(y2_fp64vec2, 0, y+(i+8 )); - vec_xst(y3_fp64vec2, 0, y+(i+12 )); - - vec_xst(y0_fp64vec2, 0, y+(i )); - vec_xst(y1_fp64vec2, 0, y+(i+4 )); - vec_xst(y2_fp64vec2, 0, y+(i+8 )); - vec_xst(y3_fp64vec2, 0, y+(i+16 )); - } - for (; i <= n-4; i += 4) - { - x0_fp64vec2 = vec_xl(0, x+(i )); - y0_fp64vec2 = vec_div(x0_fp64vec2, c_fp64vec2); - vec_xst(y0_fp64vec2, 0, y+(i )); - } - for (; i < n; i++) - y[i] = x[i] / c; -} - - -//------------------------------------------------ -// -// Testing for correctness and performance -// -// If you want to run these tests, compile this -// file with -DRUN_VSX_TESTS on a Power machine, -// and then run the executable that is generated. -// -//------------------------------------------------ -// -// Example passing run (from a Power8 machine): -// -// $ gcc VSX.c -O2 -D RUN_VSX_TESTS -o vsxtest -// $ ./vsxtest -// -// TODO -// -// -// Finished runnning all tests. All tests PASSED. -// -//------------------------------------------------ -#ifdef RUN_VSX_TESTS - -#include -#include -#include -#include -#include - -#define VSX_PERF_NUM_TEST_ELEMENTS 100000000 -#define VSX_FUNC_NUM_TEST_ELEMENTS 2507 - - -//-------------------------------------------------------------------------------------------------- -// Standard implementations: -//-------------------------------------------------------------------------------------------------- -static void standardDouble_fill(double *x, const double c, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - x[i] = c; -} - -static void standardFloat_fill(float *x, const float c, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - x[i] = c; -} - -static void standardDouble_cadd(double *z, const double *x, const double *y, const double c, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - z[i] = x[i] + c * y[i]; -} - -static void standardFloat_cadd(float *z, const float *x, const float *y, const float c, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - z[i] = x[i] + c * y[i]; -} - -static void standardDouble_adds(double *y, const double *x, const double c, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - y[i] = c + x[i]; -} - -static void standardFloat_adds(float *y, const float *x, const float c, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - y[i] = c + x[i]; -} - -static void standardDouble_cmul(double *z, const double *x, const double *y, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - z[i] = x[i] * y[i]; -} - -static void standardFloat_cmul(float *z, const float *x, const float *y, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - z[i] = x[i] * y[i]; -} - -static void standardDouble_muls(double *y, const double *x, const double c, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - y[i] = c * x[i]; -} - -static void standardFloat_muls(float *y, const float *x, const float c, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - y[i] = c * x[i]; -} - -static void standardDouble_cdiv(double *z, const double *x, const double *y, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - z[i] = x[i] / y[i]; -} - -static void standardFloat_cdiv(float *z, const float *x, const float *y, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - z[i] = x[i] / y[i]; -} - -static void standardDouble_divs(double *y, const double *x, const double c, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - y[i] = x[i] / c; -} - -static void standardFloat_divs(float *y, const float *x, const float c, const ptrdiff_t n) -{ - for (ptrdiff_t i = 0; i < n; i++) - y[i] = x[i] / c; -} - -double randDouble() -{ - return (double)(rand()%100)/(double)(rand()%100) * (rand()%2 ? -1.0 : 1.0); -} - -int near(double a, double b) -{ - int aClass = fpclassify(a); - int bClass = fpclassify(b); - - if(aClass != bClass) // i.e. is it NAN, infinite, or finite...? - return 0; - - if(aClass == FP_INFINITE) // if it is infinite, the sign must be the same, i.e. positive infinity is not near negative infinity - return (signbit(a) == signbit(b)); - else if(aClass == FP_NORMAL) // if it is a normal number then check the magnitude of the difference between the numbers - return fabs(a - b) < 0.001; - else // if both number are of the same class as each other and are of any other class (i.e. such as NAN), then they are near to each other. - return 1; -} - - -//-------------------------------------------------------------------------------------------------- -// Standard tests: -//-------------------------------------------------------------------------------------------------- -void test_THDoubleVector_fill_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - double *x_standard = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *x_optimized = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - - double yVal0 = 17.2; - double yVal1 = 8.2; - double yVal2 = 5.1; - double yVal3 = -0.9; - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardDouble_fill(x_standard, yVal0, VSX_PERF_NUM_TEST_ELEMENTS ); - standardDouble_fill(x_standard, yVal1, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardDouble_fill(x_standard, yVal2, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardDouble_fill(x_standard, yVal3, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardDouble_fill() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THDoubleVector_fill_VSX(x_optimized, yVal0, VSX_PERF_NUM_TEST_ELEMENTS ); - THDoubleVector_fill_VSX(x_optimized, yVal1, VSX_PERF_NUM_TEST_ELEMENTS-1); - THDoubleVector_fill_VSX(x_optimized, yVal2, VSX_PERF_NUM_TEST_ELEMENTS-2); - THDoubleVector_fill_VSX(x_optimized, yVal3, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THDoubleVector_fill_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - yVal0 += 1.0; - yVal1 += 1.0; - yVal2 += 1.0; - yVal3 -= 1.0; - - standardDouble_fill( x_standard, yVal0, VSX_FUNC_NUM_TEST_ELEMENTS); - THDoubleVector_fill_VSX(x_optimized, yVal0, VSX_FUNC_NUM_TEST_ELEMENTS); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - assert(x_optimized[i] == yVal0); - - standardDouble_fill( x_standard+1, yVal1, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THDoubleVector_fill_VSX(x_optimized+1, yVal1, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardDouble_fill( x_standard+2, yVal2, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THDoubleVector_fill_VSX(x_optimized+2, yVal2, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardDouble_fill( x_standard+3, yVal3, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THDoubleVector_fill_VSX(x_optimized+3, yVal3, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardDouble_fill( x_standard+517, yVal0, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THDoubleVector_fill_VSX(x_optimized+517, yVal0, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardDouble_fill( x_standard+517+r, yVal2, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THDoubleVector_fill_VSX(x_optimized+517+r, yVal2, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - assert(x_optimized[i] == x_standard[i]); - printf("All assertions PASSED for THDoubleVector_fill_VSX() test.\n\n"); - - - free(x_standard); - free(x_optimized); -} - - -void test_THFloatVector_fill_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - float *x_standard = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *x_optimized = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - - float yVal0 = 17.2; - float yVal1 = 8.2; - float yVal2 = 5.1; - float yVal3 = -0.9; - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardFloat_fill(x_standard, yVal0, VSX_PERF_NUM_TEST_ELEMENTS ); - standardFloat_fill(x_standard, yVal1, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardFloat_fill(x_standard, yVal2, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardFloat_fill(x_standard, yVal3, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardFloat_fill() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THFloatVector_fill_VSX(x_optimized, yVal0, VSX_PERF_NUM_TEST_ELEMENTS ); - THFloatVector_fill_VSX(x_optimized, yVal1, VSX_PERF_NUM_TEST_ELEMENTS-1); - THFloatVector_fill_VSX(x_optimized, yVal2, VSX_PERF_NUM_TEST_ELEMENTS-2); - THFloatVector_fill_VSX(x_optimized, yVal3, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THFloatVector_fill_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - yVal0 += 1.0; - yVal1 += 1.0; - yVal2 += 1.0; - yVal3 -= 1.0; - - standardFloat_fill( x_standard, yVal0, VSX_FUNC_NUM_TEST_ELEMENTS); - THFloatVector_fill_VSX(x_optimized, yVal0, VSX_FUNC_NUM_TEST_ELEMENTS); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - assert(x_optimized[i] == yVal0); - - standardFloat_fill( x_standard+1, yVal1, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THFloatVector_fill_VSX(x_optimized+1, yVal1, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardFloat_fill( x_standard+2, yVal2, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THFloatVector_fill_VSX(x_optimized+2, yVal2, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardFloat_fill( x_standard+3, yVal3, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THFloatVector_fill_VSX(x_optimized+3, yVal3, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardFloat_fill( x_standard+517, yVal0, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THFloatVector_fill_VSX(x_optimized+517, yVal0, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardFloat_fill( x_standard+517+r, yVal2, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THFloatVector_fill_VSX(x_optimized+517+r, yVal2, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - assert(x_optimized[i] == x_standard[i]); - printf("All assertions PASSED for THFloatVector_fill_VSX() test.\n\n"); - - - free(x_standard); - free(x_optimized); -} - - -void test_THDoubleVector_cadd_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - double *z_standard = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *z_optimized = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *x = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *y = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double c = randDouble(); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - { - x[i] = randDouble(); - y[i] = randDouble(); - } - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardDouble_cadd(z_standard, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS ); - standardDouble_cadd(z_standard, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardDouble_cadd(z_standard, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardDouble_cadd(z_standard, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardDouble_cadd() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THDoubleVector_cadd_VSX(z_optimized, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS ); - THDoubleVector_cadd_VSX(z_optimized, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - THDoubleVector_cadd_VSX(z_optimized, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - THDoubleVector_cadd_VSX(z_optimized, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THDoubleVector_cadd_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardDouble_cadd( z_standard+1, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THDoubleVector_cadd_VSX(z_optimized+1, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardDouble_cadd( z_standard+2, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THDoubleVector_cadd_VSX(z_optimized+2, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardDouble_cadd( z_standard+3, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THDoubleVector_cadd_VSX(z_optimized+3, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardDouble_cadd( z_standard+517, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THDoubleVector_cadd_VSX(z_optimized+517, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardDouble_cadd( z_standard+517+r, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THDoubleVector_cadd_VSX(z_optimized+517+r, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(z_optimized[i], z_standard[i])) - printf("%d %f %f\n", i, z_optimized[i], z_standard[i]); - assert(near(z_optimized[i], z_standard[i])); - } - printf("All assertions PASSED for THDoubleVector_cadd_VSX() test.\n\n"); - - - free(z_standard); - free(z_optimized); - free(x); -} - -void test_THFloatVector_cadd_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - float *z_standard = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *z_optimized = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *x = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *y = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float c = (float)randDouble(); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - { - x[i] = (float)randDouble(); - y[i] = (float)randDouble(); - } - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardFloat_cadd(z_standard, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS ); - standardFloat_cadd(z_standard, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardFloat_cadd(z_standard, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardFloat_cadd(z_standard, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardFloat_cadd() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THFloatVector_cadd_VSX(z_optimized, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS ); - THFloatVector_cadd_VSX(z_optimized, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - THFloatVector_cadd_VSX(z_optimized, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - THFloatVector_cadd_VSX(z_optimized, x, y, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THFloatVector_cadd_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardFloat_cadd( z_standard+1, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THFloatVector_cadd_VSX(z_optimized+1, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardFloat_cadd( z_standard+2, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THFloatVector_cadd_VSX(z_optimized+2, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardFloat_cadd( z_standard+3, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THFloatVector_cadd_VSX(z_optimized+3, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardFloat_cadd( z_standard+517, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THFloatVector_cadd_VSX(z_optimized+517, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardFloat_cadd( z_standard+517+r, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THFloatVector_cadd_VSX(z_optimized+517+r, x, y, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(z_optimized[i], z_standard[i])) - printf("%d %f %f\n", i, z_optimized[i], z_standard[i]); - assert(near(z_optimized[i], z_standard[i])); - } - printf("All assertions PASSED for THFloatVector_cadd_VSX() test.\n\n"); - - - free(z_standard); - free(z_optimized); - free(x); -} - -void test_THDoubleVector_adds_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - double *y_standard = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *y_optimized = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *x = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double c = randDouble(); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - x[i] = randDouble(); - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardDouble_adds(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - standardDouble_adds(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardDouble_adds(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardDouble_adds(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardDouble_adds() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THDoubleVector_adds_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - THDoubleVector_adds_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - THDoubleVector_adds_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - THDoubleVector_adds_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THDoubleVector_adds_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardDouble_adds( y_standard+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THDoubleVector_adds_VSX(y_optimized+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardDouble_adds( y_standard+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THDoubleVector_adds_VSX(y_optimized+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardDouble_adds( y_standard+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THDoubleVector_adds_VSX(y_optimized+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardDouble_adds( y_standard+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THDoubleVector_adds_VSX(y_optimized+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardDouble_adds( y_standard+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THDoubleVector_adds_VSX(y_optimized+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(y_optimized[i], y_standard[i])) - printf("%d %f %f\n", i, y_optimized[i], y_standard[i]); - assert(near(y_optimized[i], y_standard[i])); - } - printf("All assertions PASSED for THDoubleVector_adds_VSX() test.\n\n"); - - - free(y_standard); - free(y_optimized); - free(x); -} - - -void test_THFloatVector_adds_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - float *y_standard = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *y_optimized = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *x = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float c = (float)randDouble(); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - x[i] = (float)randDouble(); - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardFloat_adds(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - standardFloat_adds(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardFloat_adds(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardFloat_adds(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardFloat_adds() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THFloatVector_adds_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - THFloatVector_adds_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - THFloatVector_adds_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - THFloatVector_adds_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THFloatVector_adds_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardFloat_adds( y_standard+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THFloatVector_adds_VSX(y_optimized+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardFloat_adds( y_standard+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THFloatVector_adds_VSX(y_optimized+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardFloat_adds( y_standard+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THFloatVector_adds_VSX(y_optimized+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardFloat_adds( y_standard+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THFloatVector_adds_VSX(y_optimized+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardFloat_adds( y_standard+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THFloatVector_adds_VSX(y_optimized+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(y_optimized[i], y_standard[i])) - printf("%d %f %f\n", i, y_optimized[i], y_standard[i]); - assert(near(y_optimized[i], y_standard[i])); - } - printf("All assertions PASSED for THFloatVector_adds_VSX() test.\n\n"); - - - free(y_standard); - free(y_optimized); - free(x); -} - - -void test_THDoubleVector_cmul_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - double *z_standard = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *z_optimized = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *x = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *y = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - { - x[i] = randDouble(); - y[i] = randDouble(); - } - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardDouble_cmul(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS ); - standardDouble_cmul(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardDouble_cmul(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardDouble_cmul(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardDouble_cmul() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THDoubleVector_cmul_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS ); - THDoubleVector_cmul_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-1); - THDoubleVector_cmul_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-2); - THDoubleVector_cmul_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THDoubleVector_cmul_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardDouble_cmul( z_standard+1, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THDoubleVector_cmul_VSX(z_optimized+1, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardDouble_cmul( z_standard+2, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THDoubleVector_cmul_VSX(z_optimized+2, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardDouble_cmul( z_standard+3, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THDoubleVector_cmul_VSX(z_optimized+3, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardDouble_cmul( z_standard+517, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THDoubleVector_cmul_VSX(z_optimized+517, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardDouble_cmul( z_standard+517+r, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THDoubleVector_cmul_VSX(z_optimized+517+r, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(z_optimized[i], z_standard[i])) - printf("%d %f %f\n", i, z_optimized[i], z_standard[i]); - assert(near(z_optimized[i], z_standard[i])); - } - printf("All assertions PASSED for THDoubleVector_cmul_VSX() test.\n\n"); - - - free(z_standard); - free(z_optimized); - free(x); -} - -void test_THFloatVector_cmul_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - float *z_standard = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *z_optimized = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *x = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *y = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - { - x[i] = (float)randDouble(); - y[i] = (float)randDouble(); - } - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardFloat_cmul(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS ); - standardFloat_cmul(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardFloat_cmul(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardFloat_cmul(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardFloat_cmul() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THFloatVector_cmul_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS ); - THFloatVector_cmul_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-1); - THFloatVector_cmul_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-2); - THFloatVector_cmul_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THFloatVector_cmul_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardFloat_cmul( z_standard+1, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THFloatVector_cmul_VSX(z_optimized+1, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardFloat_cmul( z_standard+2, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THFloatVector_cmul_VSX(z_optimized+2, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardFloat_cmul( z_standard+3, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THFloatVector_cmul_VSX(z_optimized+3, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardFloat_cmul( z_standard+517, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THFloatVector_cmul_VSX(z_optimized+517, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardFloat_cmul( z_standard+517+r, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THFloatVector_cmul_VSX(z_optimized+517+r, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(z_optimized[i], z_standard[i])) - printf("%d %f %f\n", i, z_optimized[i], z_standard[i]); - assert(near(z_optimized[i], z_standard[i])); - } - printf("All assertions PASSED for THFloatVector_cmul_VSX() test.\n\n"); - - - free(z_standard); - free(z_optimized); - free(x); -} - -void test_THDoubleVector_muls_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - double *y_standard = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *y_optimized = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *x = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double c = randDouble(); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - { - x[i] = randDouble(); - } - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardDouble_muls(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - standardDouble_muls(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardDouble_muls(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardDouble_muls(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardDouble_muls() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THDoubleVector_muls_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - THDoubleVector_muls_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - THDoubleVector_muls_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - THDoubleVector_muls_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THDoubleVector_muls_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardDouble_muls( y_standard+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THDoubleVector_muls_VSX(y_optimized+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardDouble_muls( y_standard+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THDoubleVector_muls_VSX(y_optimized+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardDouble_muls( y_standard+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THDoubleVector_muls_VSX(y_optimized+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardDouble_muls( y_standard+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THDoubleVector_muls_VSX(y_optimized+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardDouble_muls( y_standard+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THDoubleVector_muls_VSX(y_optimized+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(y_optimized[i], y_standard[i])) - printf("%d %f %f\n", i, y_optimized[i], y_standard[i]); - assert(near(y_optimized[i], y_standard[i])); - } - printf("All assertions PASSED for THDoubleVector_muls_VSX() test.\n\n"); - - - free(y_standard); - free(y_optimized); - free(x); -} - -void test_THFloatVector_muls_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - float *y_standard = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *y_optimized = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *x = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float c = (float)randDouble(); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - { - x[i] = (float)randDouble(); - } - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardFloat_muls(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - standardFloat_muls(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardFloat_muls(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardFloat_muls(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardFloat_muls() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THFloatVector_muls_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - THFloatVector_muls_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - THFloatVector_muls_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - THFloatVector_muls_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THFloatVector_muls_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardFloat_muls( y_standard+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THFloatVector_muls_VSX(y_optimized+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardFloat_muls( y_standard+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THFloatVector_muls_VSX(y_optimized+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardFloat_muls( y_standard+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THFloatVector_muls_VSX(y_optimized+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardFloat_muls( y_standard+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THFloatVector_muls_VSX(y_optimized+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardFloat_muls( y_standard+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THFloatVector_muls_VSX(y_optimized+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(y_optimized[i], y_standard[i])) - printf("%d %f %f\n", i, y_optimized[i], y_standard[i]); - assert(near(y_optimized[i], y_standard[i])); - } - printf("All assertions PASSED for THFloatVector_muls_VSX() test.\n\n"); - - - free(y_standard); - free(y_optimized); - free(x); -} - - - -void test_THDoubleVector_cdiv_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - double *z_standard = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *z_optimized = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *x = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *y = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - { - x[i] = randDouble(); - y[i] = randDouble(); - } - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardDouble_cdiv(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS ); - standardDouble_cdiv(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardDouble_cdiv(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardDouble_cdiv(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardDouble_cdiv() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THDoubleVector_cdiv_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS ); - THDoubleVector_cdiv_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-1); - THDoubleVector_cdiv_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-2); - THDoubleVector_cdiv_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THDoubleVector_cdiv_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardDouble_cdiv( z_standard+1, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THDoubleVector_cdiv_VSX(z_optimized+1, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardDouble_cdiv( z_standard+2, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THDoubleVector_cdiv_VSX(z_optimized+2, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardDouble_cdiv( z_standard+3, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THDoubleVector_cdiv_VSX(z_optimized+3, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardDouble_cdiv( z_standard+517, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THDoubleVector_cdiv_VSX(z_optimized+517, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardDouble_cdiv( z_standard+517+r, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THDoubleVector_cdiv_VSX(z_optimized+517+r, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(z_optimized[i], z_standard[i])) - printf("%d %f %f\n", i, z_optimized[i], z_standard[i]); - assert(near(z_optimized[i], z_standard[i])); - } - printf("All assertions PASSED for THDoubleVector_cdiv_VSX() test.\n\n"); - - - free(z_standard); - free(z_optimized); - free(x); -} - -void test_THFloatVector_cdiv_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - float *z_standard = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *z_optimized = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *x = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *y = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - { - x[i] = (float)randDouble(); - y[i] = (float)randDouble(); - } - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardFloat_cdiv(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS ); - standardFloat_cdiv(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardFloat_cdiv(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardFloat_cdiv(z_standard, x, y, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardFloat_cdiv() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THFloatVector_cdiv_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS ); - THFloatVector_cdiv_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-1); - THFloatVector_cdiv_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-2); - THFloatVector_cdiv_VSX(z_optimized, x, y, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THFloatVector_cdiv_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardFloat_cdiv( z_standard+1, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THFloatVector_cdiv_VSX(z_optimized+1, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardFloat_cdiv( z_standard+2, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THFloatVector_cdiv_VSX(z_optimized+2, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardFloat_cdiv( z_standard+3, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THFloatVector_cdiv_VSX(z_optimized+3, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardFloat_cdiv( z_standard+517, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THFloatVector_cdiv_VSX(z_optimized+517, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardFloat_cdiv( z_standard+517+r, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THFloatVector_cdiv_VSX(z_optimized+517+r, x, y, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(z_optimized[i], z_standard[i])) - printf("%d %f %f\n", i, z_optimized[i], z_standard[i]); - assert(near(z_optimized[i], z_standard[i])); - } - printf("All assertions PASSED for THFloatVector_cdiv_VSX() test.\n\n"); - - - free(z_standard); - free(z_optimized); - free(x); -} - -void test_THDoubleVector_divs_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - double *y_standard = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *y_optimized = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double *x = (double *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(double)); - double c = randDouble(); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - { - x[i] = randDouble(); - } - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardDouble_divs(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - standardDouble_divs(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardDouble_divs(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardDouble_divs(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardDouble_divs() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THDoubleVector_divs_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - THDoubleVector_divs_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - THDoubleVector_divs_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - THDoubleVector_divs_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THDoubleVector_divs_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardDouble_divs( y_standard+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THDoubleVector_divs_VSX(y_optimized+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardDouble_divs( y_standard+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THDoubleVector_divs_VSX(y_optimized+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardDouble_divs( y_standard+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THDoubleVector_divs_VSX(y_optimized+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardDouble_divs( y_standard+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THDoubleVector_divs_VSX(y_optimized+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardDouble_divs( y_standard+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THDoubleVector_divs_VSX(y_optimized+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(y_optimized[i], y_standard[i])) - printf("%d %f %f\n", i, y_optimized[i], y_standard[i]); - assert(near(y_optimized[i], y_standard[i])); - } - printf("All assertions PASSED for THDoubleVector_divs_VSX() test.\n\n"); - - - free(y_standard); - free(y_optimized); - free(x); -} - -void test_THFloatVector_divs_VSX() -{ - clock_t start, end; - double elapsedSeconds_optimized, elapsedSeconds_standard; - - float *y_standard = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *y_optimized = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float *x = (float *)malloc(VSX_PERF_NUM_TEST_ELEMENTS*sizeof(float)); - float c = (float)randDouble(); - - // Initialize randomly - for(int i = 0; i < VSX_PERF_NUM_TEST_ELEMENTS; i++) - { - x[i] = (float)randDouble(); - } - - - //------------------------------------------------- - // Performance Test - //------------------------------------------------- - start = clock(); - standardFloat_divs(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - standardFloat_divs(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - standardFloat_divs(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - standardFloat_divs(y_standard, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_standard = (double)(end - start) / CLOCKS_PER_SEC; - printf("standardFloat_divs() test took %.5lf seconds\n", elapsedSeconds_standard); - - start = clock(); - THFloatVector_divs_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS ); - THFloatVector_divs_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-1); - THFloatVector_divs_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-2); - THFloatVector_divs_VSX(y_optimized, x, c, VSX_PERF_NUM_TEST_ELEMENTS-3); - end = clock(); - - elapsedSeconds_optimized = (double)(end - start) / CLOCKS_PER_SEC; - printf("THFloatVector_divs_VSX() test took %.5lf seconds\n", elapsedSeconds_optimized); - - - //------------------------------------------------- - // Correctness Test - //------------------------------------------------- - standardFloat_divs( y_standard+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - THFloatVector_divs_VSX(y_optimized+1, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-2); - standardFloat_divs( y_standard+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - THFloatVector_divs_VSX(y_optimized+2, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-4); - standardFloat_divs( y_standard+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - THFloatVector_divs_VSX(y_optimized+3, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-6); - standardFloat_divs( y_standard+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - THFloatVector_divs_VSX(y_optimized+517, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-1029); - int r = rand() % 258; - standardFloat_divs( y_standard+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - THFloatVector_divs_VSX(y_optimized+517+r, x, c, VSX_FUNC_NUM_TEST_ELEMENTS-(1029+r+100)); - - for(int i = 0; i < VSX_FUNC_NUM_TEST_ELEMENTS; i++) - { - if(!near(y_optimized[i], y_standard[i])) - printf("%d %f %f\n", i, y_optimized[i], y_standard[i]); - assert(near(y_optimized[i], y_standard[i])); - } - printf("All assertions PASSED for THFloatVector_divs_VSX() test.\n\n"); - - - free(y_standard); - free(y_optimized); - free(x); -} - - -//-------------------------------------------------------------------------------------------------- -// Run tests: -//-------------------------------------------------------------------------------------------------- -int main() -{ - printf("\n"); - - - // First test utility functions - - assert(!near(0.1, -0.1)); - assert(!near(0.1f, -0.1f)); - assert(!near(9, 10)); - assert(near(0.1, 0.1000001)); - assert(near(0.1f, 0.1000001f)); - assert(near(100.764, 100.764)); - assert(!near(NAN, 0.0)); - assert(!near(-9.5, NAN)); - assert(!near(NAN, 100)); - assert(!near(-0.0, NAN)); - assert(near(NAN, NAN)); - assert(near(INFINITY, INFINITY)); - assert(near(-INFINITY, -INFINITY)); - assert(!near(INFINITY, NAN)); - assert(!near(0, INFINITY)); - assert(!near(-999.4324, INFINITY)); - assert(!near(INFINITY, 982374.1)); - assert(!near(-INFINITY, INFINITY)); - - - - // Then test each vectorized function - - test_THDoubleVector_fill_VSX(); - test_THFloatVector_fill_VSX(); - - test_THDoubleVector_cadd_VSX(); - test_THFloatVector_cadd_VSX(); - - test_THDoubleVector_adds_VSX(); - test_THFloatVector_adds_VSX(); - - test_THDoubleVector_cmul_VSX(); - test_THFloatVector_cmul_VSX(); - - test_THDoubleVector_muls_VSX(); - test_THFloatVector_muls_VSX(); - - test_THDoubleVector_cdiv_VSX(); - test_THFloatVector_cdiv_VSX(); - - test_THDoubleVector_divs_VSX(); - test_THFloatVector_divs_VSX(); - - - - printf("Finished runnning all tests. All tests PASSED.\n"); - return 0; -} - - -#endif // defined RUN_VSX_TESTS - -#endif // defined __PPC64__ - diff --git a/contrib/lua-torch/torch7/lib/luaT/CMakeLists.txt b/contrib/lua-torch/torch7/lib/luaT/CMakeLists.txt deleted file mode 100644 index 518c407f21..0000000000 --- a/contrib/lua-torch/torch7/lib/luaT/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -# avoid some cmake warnings - -INCLUDE_DIRECTORIES(${LUA_INCDIR}) -IF(LUALIB) - LINK_DIRECTORIES(${LUA_LIBDIR}) # note: must be done before defining target -ENDIF() - -ADD_LIBRARY(luaT STATIC luaT.h luaT.c) - -IF(LUALIB) - TARGET_LINK_LIBRARIES(luaT ${LUALIB}) # must be done after ;) -ENDIF() diff --git a/contrib/lua-torch/torch7/lib/luaT/README.md b/contrib/lua-torch/torch7/lib/luaT/README.md deleted file mode 100644 index 235b8edc0e..0000000000 --- a/contrib/lua-torch/torch7/lib/luaT/README.md +++ /dev/null @@ -1,266 +0,0 @@ - -# Lua Torch C API # - -luaT provides an API to interface Lua and C in Torch packages. It defines a -concept of _classes_ to Lua for Torch, and provides a mechanism to easily -handle these Lua classes from C. - -It additionally provides few functions that `luaL` should have defined, and -defines several functions similar to `luaL` ones for better type error printing when using -`luaT` classes. - - -## Memory functions ## - -Classical memory allocation functions which generate a Lua error in case of -problem. - - -### void* luaT_alloc(lua_State *L, long size) ### - -Allocates `size` bytes, and return a pointer on the allocated -memory. A Lua error will be generated if running out of memory. - - -### void* luaT_realloc(lua_State *L, void *ptr, long size) ### - -Realloc `ptr` to `size` bytes. `ptr` must have been previously -allocated with [luaT_alloc](#luaT_alloc) or -[luaT_realloc](#luaT_realloc), or the C `malloc` or `realloc` -functions. A Lua error will be generated if running out of memory. - - -### void luaT_free(lua_State *L, void *ptr) ### - -Free memory allocated at address `ptr`. The memory must have been -previously allocated with [luaT_alloc](#luaT_alloc) or -[luaT_realloc](#luaT_realloc), or the C `malloc` or `realloc` -functions. - - -## Class creation and basic handling ## - -A `luaT` class is basically either a Lua _table_ or _userdata_ with -an appropriate _metatable_. This appropriate metatable is created with -[luaT_newmetatable](#luaT_newmetatable). Contrary to luaL userdata -functions, luaT mechanism handles inheritance. If the class inherit from -another class, then the metatable will itself have a metatable -corresponding to the _parent metatable_: the metatables are cascaded -according to the class inheritance. Multiple inheritance is not supported. - - -### Operator overloading ### - -The metatable of a `luaT` object contains `Lua` operators like -`__index`, `__newindex`, `__tostring`, `__add` -(etc...). These operators will respectively look for `__index__`, -`__newindex__`, `__tostring__`, `__add__` (etc...) in the -metatable. If found, the corresponding function or value will be returned, -else a Lua error will be raised. - -If one wants to provide `__index__` or `__newindex__` in the -metaclass, these operators must follow a particular scheme: - - * `__index__` must either return a value _and_ `true` or return `false` only. In the first case, it means `__index__` was able to handle the given argument (for e.g., the type was correct). The second case means it was not able to do anything, so `__index` in the root metatable can then try to see if the metaclass contains the required value. - - * `__newindex__` must either return `true` or `false`. As for `__index__`, `true` means it could handle the argument and `false` not. If not, the root metatable `__newindex` will then raise an error if the object was a userdata, or apply a rawset if the object was a Lua table. - -Other metaclass operators like `__tostring__`, `__add__`, etc... do not have any particular constraint. - - -### const char* luaT_newlocalmetatable(lua_State *L, const char *tname, const char *parenttname, lua_CFunction constructor, lua_CFunction destructor, lua_CFunction factory, int moduleidx) ### - -This function creates a new metatable, which is the Lua way to define a new -object class. As for `luaL_newmetatable`, the metatable is registered in -the Lua registry table, with the key `tname`. In addition, `tname` is -also registered in the Lua registry, with the metatable as key (the -typename of a given object can be thus easily retrieved). - -The class name `tname` must be of the form `modulename.classname`. If not -NULL, `parenttname` must be a valid typename corresponding to the parent -class of the new class. - -If `constructor` is not NULL, a function `new` will be added to the -metatable, pointing to this given function. - -A "constructor table" will be created by `luaT_newlocalmetatable`: it will -contain all the class methods, and be callable, calling the `constructor`, if -a `constructor` has been passed. The constructor table is either stored into -`modulename.classname` (that is in the global namespace) if `moduleidx <= -0` or in the table at index `moduleidx` in the stack (if `moduleidx > 0`). - -If not NULL, `destructor` will be called when garbage collecting the object. - -If not NULL, `factory` must be a Lua C function creating an empty object -instance of the class. This functions are used in Torch for serialization. - -Note that classes can be partly defined in C and partly defined in Lua: -once the metatable is created in C, it can be filled up with additional -methods in Lua. - -The return value is the value returned by [luaT_typenameid](#luat_typenameid). - - -### const char* luaT_newmetatable(lua_State *L, const char *tname, const char *parenttname, lua_CFunction constructor, lua_CFunction destructor, lua_CFunction factory) ### - -Same as [luaT_newlocalmetatable](#luat_newmetatable), but where the -constructor table is assigned in the global namespace (`moduleidx = 0`). - - -### int luaT_pushmetatable(lua_State *L, const name *tname) ### - -Push the metatable with type name `tname` on the stack, if `tname` is a -valid Torch class name (previously registered with luaT_newmetatable). - -On success, returns 1. If `tname` is invalid, nothing is pushed and it -returns 0. - - -### const char* luaT_typenameid(lua_State *L, const char *tname) ### - -If `tname` is a valid Torch class name, then returns a unique string (the -contents will be the same as `tname`) pointing to the string registered -in the Lua registry. This string is thus valid as long as Lua is -running. The returned string shall not be freed. - -If `tname` is an invalid class name, returns NULL. - - -### const char* luaT_typename(lua_State *L, int ud) ### - -Returns the typename of the object at index `ud` on the stack. If it is -not a valid Torch object, returns NULL. - - -### void luaT_pushudata(lua_State *L, void *udata, const char *tname) ### - -Given a C structure `udata`, push a userdata object on the stack with -metatable corresponding to `tname`. Obviously, `tname` must be a valid -Torch name registered with [luaT_newmetatable](#luat_newmetatable). - - -### void *luaT_toudata(lua_State *L, int ud, const char *tname) ### - -Returns a pointer to the original C structure previously pushed on the -stack with [luaT_pushudata](#luat_pushudata), if the object at index -`ud` is a valid Torch class name. Returns NULL otherwise. - - -### int luaT_isudata(lua_State *L, int ud, const char *tname) ### - -Returns 1 if the object at index `ud` on the stack is a valid Torch class name `tname`. -Returns 0 otherwise. - - -### Checking fields of a table ### - -This functions check that the table at the given index `ud` on the Lua -stack has a field named `field`, and that it is of the specified type. -These function raises a Lua error on failure. - - -## void *luaT_getfieldcheckudata(lua_State *L, int ud, const char *field, const char *tname) ## - -Checks that the field named `field` of the table at index `ud` is a -Torch class name `tname`. Returns the pointer of the C structure -previously pushed on the stack with [luaT_pushudata](#luat_pushudata) on -success. The function raises a Lua error on failure. - - -## void *luaT_getfieldchecklightudata(lua_State *L, int ud, const char *field) ## - -Checks that the field named `field` of the table at index `ud` is a -lightuserdata. Returns the lightuserdata pointer on success. The function -raises a Lua error on failure. - - -## int luaT_getfieldcheckint(lua_State *L, int ud, const char *field) ## - -Checks that the field named `field` of the table at index `ud` is an -int. Returns the int value pointer on success. The function raises a Lua -error on failure. - - -## const char* luaT_getfieldcheckstring(lua_State *L, int ud, const char *field) ## - -Checks that the field named `field` of the table at index `ud` is a -string. Returns a pointer to the string on success. The function raises a -Lua error on failure. - - -## int luaT_getfieldcheckboolean(lua_State *L, int ud, const char *field) ## - -Checks that the field named `field` of the table at index `ud` is a -boolean. On success, returns 1 if the boolean is `true`, 0 if it is -`false`. The function raises a Lua error on failure. - - -## void luaT_getfieldchecktable(lua_State *L, int ud, const char *field) ## - -Checks that the field named `field` of the table at index `ud` is a -table. On success, push the table on the stack. The function raises a Lua -error on failure. - - -### int luaT_typerror(lua_State *L, int ud, const char *tname) ### - -Raises a `luaL_argerror` (and returns its value), claiming that the -object at index `ud` on the stack is not of type `tname`. Note that -this function does not check the type, it only raises an error. - - -### int luaT_checkboolean(lua_State *L, int ud) ### - -Checks that the value at index `ud` is a boolean. On success, returns 1 -if the boolean is `true`, 0 if it is `false`. The function raises a Lua -error on failure. - - -### int luaT_optboolean(lua_State *L, int ud, int def) ### - -Checks that the value at index `ud` is a boolean. On success, returns 1 -if the boolean is `true`, 0 if it is `false`. If there is no value at -index `ud`, returns `def`. In any other cases, raises an error. - - -### void luaT_registeratname(lua_State *L, const struct luaL_Reg *methods, const char *name) ### - -This function assume a table is on the stack. It creates a table field -`name` in the table (if this field does not exist yet), and fill up -`methods` in this table field. - - -### const char *luaT_classrootname(const char *tname) ### - -Assuming `tname` is of the form `A.b.c`, returns 'c'. The returned value -shall not be freed. It is a pointer inside `tname` string. - - -### int luaT_classmodulename(const char *tname, char *parent_name) ### -Alias to `luaT_fullparentname ` for ensuring backwards compatibility; -use of `luaT_fullparentname` is preferred. - - -### int luaT_fullparentname(const char *tname, char *parent_name) ### - -Returns a 0-1 valued integer indicating whether `tname` has a parent module. -Assuming `tname` is of the form `A.b.c`, sets `parent_name` to `A.b`. - - -### int luaT_outerparentname(const char *tname, char *parent_name) ### - -Returns a 0-1 valued integer indicating whether `tname` has a parent module. -Assuming `tname` is of the form `A.b.c`, sets `parent_name` to `A`. - - -### int luaT_innerparentname(const char *tname, char *parent_name) ### - -Returns a 0-1 valued integer indicating whether `tname` has a parent module. -Assuming `tname` is of the form `A.b.c`, sets `parent_name` to `b`. - - -### void luaT_stackdump(lua_State *L) ### - -This function print outs the state of the Lua stack. It is useful for debug -purposes. - diff --git a/contrib/lua-torch/torch7/lib/luaT/luaT.c b/contrib/lua-torch/torch7/lib/luaT/luaT.c deleted file mode 100644 index d87f5d54c2..0000000000 --- a/contrib/lua-torch/torch7/lib/luaT/luaT.c +++ /dev/null @@ -1,1373 +0,0 @@ -#include -#include -#include - -#include "luaT.h" - -void* luaT_alloc(lua_State *L, ptrdiff_t size) -{ - void *ptr; - - if(size == 0) - return NULL; - - if(size < 0) - luaL_error(L, "$ Torch: invalid memory size -- maybe an overflow?"); - - ptr = malloc(size); - if(!ptr) - luaL_error(L, "$ Torch: not enough memory: you tried to allocate %dGB. Buy new RAM!", size/1073741824); - - return ptr; -} - -void* luaT_realloc(lua_State *L, void *ptr, ptrdiff_t size) -{ - if(!ptr) - return(luaT_alloc(L, size)); - - if(size == 0) - { - luaT_free(L, ptr); - return NULL; - } - - if(size < 0) - luaL_error(L, "$ Torch: invalid memory size -- maybe an overflow?"); - - ptr = realloc(ptr, size); - if(!ptr) - luaL_error(L, "$ Torch: not enough memory: you tried to reallocate %dGB. Buy new RAM!", size/1073741824); - return ptr; -} - -void luaT_free(lua_State *L, void *ptr) -{ - free(ptr); -} - -void luaT_setfuncs(lua_State *L, const luaL_Reg *l, int nup) -{ -#if LUA_VERSION_NUM == 501 - luaL_checkstack(L, nup+1, "too many upvalues"); - for (; l->name != NULL; l++) { /* fill the table with given functions */ - int i; - lua_pushstring(L, l->name); - for (i = 0; i < nup; i++) /* copy upvalues to the top */ - lua_pushvalue(L, -(nup+1)); - lua_pushcclosure(L, l->func, nup); /* closure with those upvalues */ - lua_settable(L, -(nup + 3)); - } - lua_pop(L, nup); /* remove upvalues */ -#else - luaL_setfuncs(L, l, nup); -#endif -} - -void luaT_stackdump(lua_State *L) -{ - int i; - const char *tname = NULL; - int top = lua_gettop(L); - for(i = 1; i <= top; i++) - { - int t = lua_type(L, i); - printf("%3d. ", i); - switch(t) - { - case LUA_TSTRING: - printf("'%s'", lua_tostring(L,i)); - break; - case LUA_TBOOLEAN: - printf(lua_toboolean(L, i) ? "true" : "false"); - break; - case LUA_TNUMBER: - printf("%g", lua_tonumber(L,i)); - break; - case LUA_TUSERDATA: - tname = luaT_typename(L, i); - printf("userdata %p [%s]", lua_topointer(L, i), (tname ? tname : "not a Torch object")); - break; - case 10: - tname = luaT_typename(L, i); - printf("cdata %p [%s]", lua_topointer(L, i), (tname ? tname : "not a Torch object")); - break; - case LUA_TTABLE: - lua_pushvalue(L, i); - lua_rawget(L, LUA_REGISTRYINDEX); - if(lua_isstring(L, -1)) - tname = lua_tostring(L, -1); /*luaT_typenameid(L, lua_tostring(L, -1)); */ - else - tname = NULL; - lua_pop(L, 1); - if(tname) - printf("metatable [%s]", tname); - else - { - tname = luaT_typename(L, i); - printf("table %p [%s]", lua_topointer(L, i), (tname ? tname : "not a Torch object")); - } - break; - default: - printf("Lua object type: %s", lua_typename(L,t)); - break; - } - printf("\n"); - } - printf("---------------------------------------------\n"); -} - -/* metatable operator methods */ -static int luaT_mt__index(lua_State *L); -static int luaT_mt__newindex(lua_State *L); -static int luaT_mt__tostring(lua_State *L); -static int luaT_mt__add(lua_State *L); -static int luaT_mt__sub(lua_State *L); -static int luaT_mt__mul(lua_State *L); -static int luaT_mt__div(lua_State *L); -static int luaT_mt__mod(lua_State *L); -static int luaT_mt__pow(lua_State *L); -static int luaT_mt__unm(lua_State *L); -static int luaT_mt__concat(lua_State *L); -static int luaT_mt__len(lua_State *L); -static int luaT_mt__eq(lua_State *L); -static int luaT_mt__lt(lua_State *L); -static int luaT_mt__le(lua_State *L); -static int luaT_mt__call(lua_State *L); - -/* Constructor-metatable methods */ -static int luaT_cmt__call(lua_State *L); -static int luaT_cmt__newindex(lua_State *L); - -const char* luaT_newmetatable(lua_State *L, const char *tname, const char *parent_tname, - lua_CFunction constructor, lua_CFunction destructor, lua_CFunction factory) -{ - return luaT_newlocalmetatable(L, tname, parent_tname, - constructor, destructor, factory, 0); -} - -const char* luaT_newlocalmetatable(lua_State *L, const char *tname, const char *parent_tname, - lua_CFunction constructor, lua_CFunction destructor, lua_CFunction factory, int moduleidx) -{ - lua_pushcfunction(L, luaT_lua_newmetatable); - lua_pushstring(L, tname); - (parent_tname ? (void)lua_pushstring(L, parent_tname) : lua_pushnil(L)); - (constructor ? lua_pushcfunction(L, constructor) : lua_pushnil(L)); - (destructor ? lua_pushcfunction(L, destructor) : lua_pushnil(L)); - (factory ? lua_pushcfunction(L, factory) : lua_pushnil(L)); - (moduleidx > 0 ? lua_pushvalue(L, moduleidx) : lua_pushnil(L)); - lua_call(L, 6, 1); - return luaT_typenameid(L, tname); -} - -int luaT_pushmetatable(lua_State *L, const char *tname) -{ - lua_getfield(L, LUA_REGISTRYINDEX, tname); - if(lua_isnil(L, -1)) - { - lua_pop(L, 1); - return 0; - } - return 1; -} - -const char *luaT_typenameid(lua_State *L, const char *tname) -{ - if(luaT_pushmetatable(L, tname)) - { - const char *tnameid = NULL; - lua_rawget(L, LUA_REGISTRYINDEX); - if(lua_isstring(L, -1)) - tnameid = lua_tostring(L, -1); - lua_pop(L, 1); /* the string/nil */ - return tnameid; - } - return NULL; -} - -static const char cdataname[] = "" - "local ok, ffi = pcall(require, 'ffi')\n" - "if ok then\n" - " local id2name = {}\n" - " return function(cdata, name)\n" - " local id\n" - " if jit then\n" - " id = tonumber(ffi.typeof(cdata))\n" - " else\n" - " id = tostring(ffi.typeof(cdata))\n" - " end\n" - " if id then\n" - " if name then\n" - " id2name[id] = name\n" - " return name\n" - " else\n" - " return rawget(id2name, id)\n" - " end\n" - " end\n" - " return nil\n" - " end\n" - "else\n" - " return function() end\n" - "end\n"; - -static const char* luaT_cdataname(lua_State *L, int ud, const char *tname) -{ - lua_pushstring(L, "__cdataname"); - lua_rawget(L, LUA_REGISTRYINDEX); - if(lua_isnil(L,-1)) - { - lua_pop(L, 1); - - if(luaL_dostring(L, cdataname)) /* did something go wrong? */ - luaL_error(L, "internal error (could not load cdataname): %s", lua_tostring(L, -1)); - - lua_pushstring(L, "__cdataname"); - lua_pushvalue(L, -2); - lua_rawset(L, LUA_REGISTRYINDEX); - } - if(!lua_isfunction(L, -1)) /* should not happen */ - luaL_error(L, "internal error (cdataname is not a function)"); - - lua_pushvalue(L, ud); - if(tname) - lua_pushstring(L, tname); - if(lua_pcall(L, (tname ? 2 : 1), 1, 0)) - luaL_error(L, "internal error (cdataname): %s", lua_tostring(L, -1)); - - tname = lua_tostring(L, -1); - lua_pop(L, 1); - - return tname; -} - -static void* CDATA_MT_KEY = &CDATA_MT_KEY; -static const char cdatamt[] = "" - "local ok, ffi = pcall(require, 'ffi')\n" - "if ok and not jit then\n" - " return ffi.debug().cdata_mt\n" - "else\n" - " return {}\n" - "end\n"; - -static int luaT_iscdata(lua_State *L, int ud) -{ - int type = lua_type(L, ud); - if(type == 10) - return 1; - if(type != LUA_TUSERDATA) - return 0; - if(!lua_getmetatable(L, ud)) - return 0; - - lua_pushlightuserdata(L, CDATA_MT_KEY); - lua_rawget(L, LUA_REGISTRYINDEX); - if (lua_isnil(L, -1)) - { - // initialize cdata metatable - lua_pop(L, 1); - if(luaL_dostring(L, cdatamt)) - luaL_error(L, "internal error (could not load cdata mt): %s", lua_tostring(L, -1)); - - lua_pushlightuserdata(L, CDATA_MT_KEY); - lua_pushvalue(L, -2); - lua_rawset(L, LUA_REGISTRYINDEX); - } - - int iscdata = lua_rawequal(L, -1, -2); - lua_pop(L, 2); - return iscdata; -} - -const char* luaT_typename(lua_State *L, int ud) -{ - if(luaT_iscdata(L, ud)) - return luaT_cdataname(L, ud, NULL); - else if(lua_getmetatable(L, ud)) - { - const char *tname = NULL; - lua_rawget(L, LUA_REGISTRYINDEX); - if(lua_isstring(L, -1)) - tname = lua_tostring(L, -1); - lua_pop(L, 1); /* the string/nil */ - return tname; - } - return NULL; -} - -void luaT_pushudata(lua_State *L, void *udata, const char *tname) -{ - if(udata) - { - void **udata_p = lua_newuserdata(L, sizeof(void*)); - *udata_p = udata; - if(!luaT_pushmetatable(L, tname)) - luaL_error(L, "Torch internal problem: cannot find metatable for type <%s>", tname); - lua_setmetatable(L, -2); - } - else - lua_pushnil(L); -} - -void *luaT_toudata(lua_State *L, int ud, const char *tname) -{ - void **p = lua_touserdata(L, ud); - if(p != NULL) /* value is a userdata? */ - { - if(!luaT_pushmetatable(L, tname)) - luaL_error(L, "Torch internal problem: cannot find metatable for type <%s>", tname); - - /* initialize the table we want to get the metatable on */ - /* note that we have to be careful with indices, as we just inserted stuff */ - lua_pushvalue(L, (ud < 0 ? ud - 1 : ud)); - while(lua_getmetatable(L, -1)) /* get the next metatable */ - { - lua_remove(L, -2); /* remove the previous metatable [or object, if first time] */ - if(lua_rawequal(L, -1, -2)) - { - lua_pop(L, 2); /* remove the two metatables */ - return *p; - } - } - lua_pop(L, 2); /* remove the two metatables */ - } - return NULL; -} - -int luaT_isudata(lua_State *L, int ud, const char *tname) -{ - if(luaT_toudata(L, ud, tname)) - return 1; - else - return 0; -} - -void *luaT_checkudata(lua_State *L, int ud, const char *tname) -{ - void *p = luaT_toudata(L, ud, tname); - if(!p) - luaT_typerror(L, ud, tname); - return p; -} - -void luaT_pushlong(lua_State *L, long n) -{ -#if LUA_VERSION_NUM >= 503 - /* Only push the value as an integer if it fits in lua_Integer, - or if the lua_Number representation will be even worse */ - if (sizeof(lua_Integer) >= sizeof(long) || sizeof(lua_Number) <= sizeof(lua_Integer)) { - lua_pushinteger(L, n); - } else { - lua_pushnumber(L, (lua_Number)n); - } -#else - lua_pushnumber(L, (lua_Number)n); -#endif -} - -long luaT_checklong(lua_State *L, int idx) -{ -#if LUA_VERSION_NUM >= 503 - if (sizeof(lua_Integer) >= sizeof(long) || sizeof(lua_Number) <= sizeof(lua_Integer)) { - return (long)luaL_checkinteger(L, idx); - } else { - return (long)luaL_checknumber(L, idx); - } -#else - return (long)luaL_checknumber(L, idx); -#endif -} - -long luaT_tolong(lua_State *L, int idx) -{ -#if LUA_VERSION_NUM == 503 - if (sizeof(lua_Integer) >= sizeof(long) || sizeof(lua_Number) <= sizeof(lua_Integer)) { - return (long)lua_tointeger(L, idx); - } else { - return (long)lua_tonumber(L, idx); - } -#else - return (long)lua_tonumber(L, idx); -#endif -} - -void luaT_pushinteger(lua_State *L, ptrdiff_t n) -{ -#if LUA_VERSION_NUM >= 503 - /* Only push the value as an integer if it fits in lua_Integer, - or if the lua_Number representation will be even worse */ - if (sizeof(lua_Integer) >= sizeof(ptrdiff_t) || sizeof(lua_Number) <= sizeof(lua_Integer)) { - lua_pushinteger(L, n); - } else { - lua_pushnumber(L, (lua_Number)n); - } -#else - lua_pushnumber(L, (lua_Number)n); -#endif -} - -ptrdiff_t luaT_checkinteger(lua_State *L, int idx) -{ -#if LUA_VERSION_NUM >= 503 - if (sizeof(lua_Integer) >= sizeof(ptrdiff_t) || sizeof(lua_Number) <= sizeof(lua_Integer)) { - return (ptrdiff_t)luaL_checkinteger(L, idx); - } else { - return (ptrdiff_t)luaL_checknumber(L, idx); - } -#else - return (ptrdiff_t)luaL_checknumber(L, idx); -#endif -} - -void *luaT_getfieldcheckudata(lua_State *L, int ud, const char *field, const char *tname) -{ - void *p; - lua_getfield(L, ud, field); - if(lua_isnil(L, -1)) - luaL_error(L, "bad argument #%d (field %s does not exist)", ud, field); - p = luaT_toudata(L, -1, tname); - if(!p) - luaL_error(L, "bad argument #%d (field %s is not a %s)", ud, field, tname); - return p; -} - -void *luaT_getfieldchecklightudata(lua_State *L, int ud, const char *field) -{ - void *p; - lua_getfield(L, ud, field); - if(lua_isnil(L, -1)) - luaL_error(L, "bad argument #%d (field %s does not exist)", ud, field); - - if(!lua_islightuserdata(L, -1)) - luaL_error(L, "bad argument #%d (field %s is not a light userdata)", ud, field); - - p = lua_touserdata(L, -1); - - return p; -} - -double luaT_getfieldchecknumber(lua_State *L, int ud, const char *field) -{ - lua_getfield(L, ud, field); - if(lua_isnil(L, -1)) - luaL_error(L, "bad argument #%d (field %s does not exist)", ud, field); - if(!lua_isnumber(L, -1)) - luaL_error(L, "bad argument #%d (field %s is not a number)", ud, field); - return lua_tonumber(L, -1); -} - -int luaT_getfieldcheckint(lua_State *L, int ud, const char *field) -{ - lua_getfield(L, ud, field); - if(lua_isnil(L, -1)) - luaL_error(L, "bad argument #%d (field %s does not exist)", ud, field); - if(!lua_isnumber(L, -1)) - luaL_error(L, "bad argument #%d (field %s is not a number)", ud, field); - return (int)lua_tonumber(L, -1); -} - -const char* luaT_getfieldcheckstring(lua_State *L, int ud, const char *field) -{ - lua_getfield(L, ud, field); - if(lua_isnil(L, -1)) - luaL_error(L, "bad argument #%d (field %s does not exist)", ud, field); - if(!lua_isstring(L, -1)) - luaL_error(L, "bad argument #%d (field %s is not a string)", ud, field); - return lua_tostring(L, -1); -} - -int luaT_getfieldcheckboolean(lua_State *L, int ud, const char *field) -{ - lua_getfield(L, ud, field); - if(lua_isnil(L, -1)) - luaL_error(L, "bad argument #%d (field %s does not exist)", ud, field); - if(!lua_isboolean(L, -1)) - luaL_error(L, "bad argument #%d (field %s is not a boolean)", ud, field); - return lua_toboolean(L, -1); -} - -void luaT_getfieldchecktable(lua_State *L, int ud, const char *field) -{ - lua_getfield(L, ud, field); - if(lua_isnil(L, -1)) - luaL_error(L, "bad argument #%d (field %s does not exist)", ud, field); - if(!lua_istable(L, -1)) - luaL_error(L, "bad argument #%d (field %s is not a table)", ud, field); -} - -/**** type checks as in luaL ****/ -int luaT_typerror(lua_State *L, int ud, const char *tname) -{ - const char *msg; - const char *tnameud = luaT_typename(L, ud); - - if(!tnameud) - tnameud = lua_typename(L, ud); - - msg = lua_pushfstring(L, "%s expected, got %s", - tname, - (tnameud ? tnameud : "unknown object")); - - return luaL_argerror(L, ud, msg); -} - -int luaT_checkboolean(lua_State *L, int ud) -{ - if(!lua_isboolean(L, ud)) - luaT_typerror(L, ud, lua_typename(L, LUA_TBOOLEAN)); - return lua_toboolean(L, ud); -} - -int luaT_optboolean(lua_State *L, int ud, int def) -{ - if(lua_isnoneornil(L,ud)) - return def; - - return luaT_checkboolean(L, ud); -} - -void luaT_registeratname(lua_State *L, const struct luaL_Reg *methods, const char *name) -{ - int idx = lua_gettop(L); - - luaL_checktype(L, idx, LUA_TTABLE); - lua_pushstring(L, name); - lua_rawget(L, idx); - - if(lua_isnil(L, -1)) - { - lua_pop(L, 1); - lua_pushstring(L, name); - lua_newtable(L); - lua_rawset(L, idx); - - lua_pushstring(L, name); - lua_rawget(L, idx); - } - - luaT_setfuncs(L, methods, 0); - lua_pop(L, 1); -} - - -/* returns the name of the class itself (sans nesting) */ -const char* luaT_classrootname(const char *tname) -{ - int idx; - int sz = strlen(tname); - - for(idx = sz-1; idx >= 0 ; idx--) - { - if(tname[idx] == '.') - return tname+idx+1; - } - return tname; -} - -/* parent_name must be a buffer at least as big as tname. - * If class has a parent, returns true; and, sets - * parent name to that of full parent hierarchy (e.g. - * given class `A.b.c`, sets parent_name to `A.b`) - */ -int luaT_fullparentname(const char *tname, char *parent_name) -{ - int sz = strlen(tname); - int idx; - for(idx = sz-1; idx > 0 ; idx--) - if(tname[idx] == '.' || tname[idx] == '\0') break; - - if (idx > 0) strncpy(parent_name, tname, idx); - parent_name[idx] = '\0'; - return tname[idx] == '.'; -} - -/* alias for ensuring backwards compatibility; - * use of luaT_fullparentname is preferred. - */ -int luaT_classmodulename(const char *tname, char *parent_name) -{ - return luaT_fullparentname(tname, parent_name); -} - -/* parent_name must be a buffer at least as big as tname. - * If class has a parent, returns true; and, sets - * parent name to that of outermost parent (e.g. - * given class `A.b.c`, sets parent_name to `A`) - */ -int luaT_outerparentname(const char *tname, char *parent_name) -{ - char chars[] = {'.', '\0'}; - size_t idx; - idx = strcspn(tname, chars); - strncpy(parent_name, tname, idx); - parent_name[idx] = '\0'; - return tname[idx] == '.'; -} - -/* parent_name must be a buffer at least as big as tname. - * If class has a parent, returns true; and, sets parent - * name to that of innermost parent (e.g. given class - * `A.b.c`, sets parent_name to `b`). In the comments - * below, the inner parent name is abbreviated as IPN. - */ -int luaT_innerparentname(const char *tname, char *parent_name) -{ - int sz = strlen(tname); - int tail, head; - for(tail = sz-1; tail >= 0 ; tail--) // tail points to - if(tname[tail] == '.') break; // just past IPN - - if (tail == 0) return 0; - - for(head = tail-1; head >= 0; head--) // head points to - if(tname[head] == '.') break; // just before IPN - - head += 1; // update head to start of IPN - tail -= head; // update tail to strlen(IPN) - strncpy(parent_name, tname+head, tail); - parent_name[tail] = '\0'; - return 1; -} - -/* Method for pushing a class's immediate parent to the - * stack (e.g. given class `A.b.c`, pushes `b` to the stack) - */ -void luaT_getinnerparent(lua_State *L, const char *tname) -{ - /* Local variables */ - char term[256]; - char chars[] = {'.', '\0'}; - const char *tname_full = tname; // used for error case - - /* Get outermost table from Lua */ - int n = strcspn(tname, chars); - strncpy(term, tname, n); - term[n] = '\0'; - lua_getglobal(L, term); - tname += n + 1; - - /* Traverse hierarchy down to last table*/ - n = strcspn(tname, chars); - while(n < strlen(tname)) - { - /* Check that current parent is a table (i.e. a module) */ - if(!lua_istable(L, -1)){ - strncpy(term, tname_full, tname - tname_full - 1); - term[tname - tname_full] = '\0'; - luaL_error(L, "while creating metatable %s: bad argument #1 (%s is an invalid module name)", tname_full, term); - } - strncpy(term, tname, n); - term[n] = '\0'; - lua_getfield(L, -1, term); - lua_remove(L, -2); - tname += n + 1; - n = strcspn(tname, chars); // prepare for next - } - - /* Check that resulting parent is a table (i.e. a module) */ - if(!lua_istable(L, -1)){ - strncpy(term, tname_full, tname - tname_full - 1); - term[tname - tname_full] = '\0'; - luaL_error(L, "while creating metatable %s: bad argument #1 (%s is an invalid module name)", tname_full, term); - } -} - - -int luaT_lua_newmetatable(lua_State *L) -{ - /* Local Variables */ - const char* tname = luaL_checkstring(L, 1); - char parent_name[256]; - int is_in_module = 0; - - /* Argument Checking */ - lua_settop(L, 6); - luaL_argcheck(L, lua_isnoneornil(L, 2) || lua_isstring(L, 2), 2, "parent class name or nil expected"); - luaL_argcheck(L, lua_isnoneornil(L, 3) || lua_isfunction(L, 3), 3, "constructor function or nil expected"); - luaL_argcheck(L, lua_isnoneornil(L, 4) || lua_isfunction(L, 4), 4, "destructor function or nil expected"); - luaL_argcheck(L, lua_isnoneornil(L, 5) || lua_isfunction(L, 5), 5, "factory function or nil expected"); - luaL_argcheck(L, lua_isnoneornil(L, 6) || lua_istable(L, 6), 6, "module table or nil expected"); - - /* Push immediate parent module to stack */ - if(lua_isnoneornil(L, 6)) { - lua_pop(L, 1); /* remove the nil */ - is_in_module = luaT_fullparentname(tname, parent_name); - if (is_in_module) - luaT_getinnerparent(L, tname); - else - lua_pushglobaltable(L); - } - - if(!lua_istable(L, -1)) - luaL_error(L, "while creating metatable %s: bad argument #1 (%s is an invalid module name)", tname, parent_name); - - /* we first create the new metaclass if we have to */ - if(!luaT_pushmetatable(L, tname)) - { - /* create the metatable */ - lua_newtable(L); - - /* registry[name] = metatable */ - lua_pushvalue(L, -1); - lua_setfield(L, LUA_REGISTRYINDEX, tname); - - /* registry[metatable] = tname */ - lua_pushvalue(L, -1); - lua_pushstring(L, tname); - lua_rawset(L, LUA_REGISTRYINDEX); - - /* __index handling */ - lua_pushcfunction(L, luaT_mt__index); - lua_setfield(L, -2, "__index"); - - /* __newindex handling */ - lua_pushcfunction(L, luaT_mt__newindex); - lua_setfield(L, -2, "__newindex"); - - /* __typename contains the typename */ - lua_pushstring(L, tname); - lua_setfield(L, -2, "__typename"); - - /* __metatable is self */ - lua_pushvalue(L, -1); - lua_setfield(L, -2, "__metatable"); - - /* by default, __version equals 1 */ - lua_pushnumber(L, 1); - lua_setfield(L, -2, "__version"); - - /* assign default operator functions */ - lua_pushcfunction(L, luaT_mt__tostring); - lua_setfield(L, -2, "__tostring"); - - lua_pushcfunction(L, luaT_mt__add); - lua_setfield(L, -2, "__add"); - - lua_pushcfunction(L, luaT_mt__sub); - lua_setfield(L, -2, "__sub"); - - lua_pushcfunction(L, luaT_mt__mul); - lua_setfield(L, -2, "__mul"); - - lua_pushcfunction(L, luaT_mt__div); - lua_setfield(L, -2, "__div"); - - lua_pushcfunction(L, luaT_mt__mod); - lua_setfield(L, -2, "__mod"); - - lua_pushcfunction(L, luaT_mt__pow); - lua_setfield(L, -2, "__pow"); - - lua_pushcfunction(L, luaT_mt__unm); - lua_setfield(L, -2, "__unm"); - - lua_pushcfunction(L, luaT_mt__concat); - lua_setfield(L, -2, "__concat"); - - lua_pushcfunction(L, luaT_mt__len); - lua_setfield(L, -2, "__len"); - - lua_pushcfunction(L, luaT_mt__eq); - lua_setfield(L, -2, "__eq"); - - lua_pushcfunction(L, luaT_mt__lt); - lua_setfield(L, -2, "__lt"); - - lua_pushcfunction(L, luaT_mt__le); - lua_setfield(L, -2, "__le"); - - lua_pushcfunction(L, luaT_mt__call); - lua_setfield(L, -2, "__call"); - } - - /* we assign the parent class if necessary */ - if(!lua_isnoneornil(L, 2)) - { - if(lua_getmetatable(L, -1)) - luaL_error(L, "class %s has been already assigned a parent class\n", tname); - else - { - const char* parent_tname = luaL_checkstring(L, 2); - if(!luaT_pushmetatable(L, parent_tname)) - luaL_error(L, "bad argument #2 (invalid parent class name %s)", parent_tname); - lua_setmetatable(L, -2); - } - } - - /* register the destructor function */ - if(!lua_isnoneornil(L, 4)) - { - /* does it exists already? */ - lua_pushstring(L, "__gc"); - lua_rawget(L, -2); - - if(lua_isnil(L, -1)) - { - lua_pop(L, 1); /* pop nil */ - lua_pushstring(L, "__gc"); - lua_pushvalue(L, 4); - lua_rawset(L, -3); - } - else - luaL_error(L, "%s has been already assigned a destructor", tname); - } - - /* register the factory function */ - if(!lua_isnoneornil(L, 5)) - { - /* does it exists already? */ - lua_pushstring(L, "__factory"); - lua_rawget(L, -2); - - if(lua_isnil(L, -1)) - { - lua_pop(L, 1); /* pop nil */ - lua_pushstring(L, "__factory"); - lua_pushvalue(L, 5); - lua_rawset(L, -3); - } - else - luaL_error(L, "%s has been already assigned a factory", tname); - } - - /******** Constructor table and metatable ********/ - lua_pushstring(L, "__constructor"); - lua_rawget(L, -2); - if(lua_isnil(L, -1)) - { - lua_pop(L, 1); /* pop nil */ - lua_newtable(L); /* fancy table */ - lua_newtable(L); /* fancy metatable */ - - lua_pushvalue(L, -3); /* metatable */ - lua_setfield(L, -2, "__index"); /* so we can get the methods */ - - lua_pushcfunction(L, luaT_cmt__newindex); - lua_setfield(L, -2, "__newindex"); /* so we add new methods */ - - lua_pushcfunction(L, luaT_cmt__call); - lua_setfield(L, -2, "__call"); /* so we can create, we are here for only that */ - - lua_pushvalue(L, -3); - lua_setfield(L, -2, "__metatable"); /* redirect to metatable with methods */ - - lua_setmetatable(L, -2); /* constructor metatable is ... this fancy metatable */ - - /* set metatable[__constructor] = constructor-metatable */ - lua_pushstring(L, "__constructor"); - lua_pushvalue(L, -2); - lua_rawset(L, -4); - } - - /* register the constructor function */ - if(!lua_isnoneornil(L, 3)) - { - /* get constructor metatable */ - lua_getmetatable(L, -1); - - /* does it exists already? */ - lua_pushstring(L, "__new"); - lua_rawget(L, -2); - - if(lua_isnil(L, -1)) - { - lua_pop(L, 1); /* pop nil */ - lua_pushstring(L, "__new"); - lua_pushvalue(L, 3); - lua_rawset(L, -3); - - /* set "new" in the metatable too */ - lua_pushstring(L, "new"); - lua_pushvalue(L, 3); - lua_rawset(L, -5); - } - else - luaL_error(L, "%s has been already assigned a constructor", tname); - - /* pop constructor metatable */ - lua_pop(L, 1); - } - - /* module.name = constructor metatable */ - lua_setfield(L, 6, luaT_classrootname(tname)); - - return 1; /* returns the metatable */ -} - -/* Lua only utility functions */ - -/* add any custom type, provided the object has a metatable */ -int luaT_lua_metatype(lua_State *L) -{ - if( (lua_gettop(L) != 2) && (lua_gettop(L) != 3) ) - luaL_error(L, "expecting: string table [ctype]"); - - luaL_checkstring(L, 1); - luaL_checktype(L, 2, LUA_TTABLE); - - if(lua_gettop(L) == 3) - { - if(!luaT_cdataname(L, 3, lua_tostring(L, 1))) - luaL_error(L, "could not register cdata type -- missing ffi library?"); - } - - /* registry[name] = metatable */ - lua_pushvalue(L, 1); - lua_pushvalue(L, 2); - lua_rawset(L, LUA_REGISTRYINDEX); - - /* registry[metatable] = tname */ - lua_pushvalue(L, 2); - lua_pushvalue(L, 1); - lua_rawset(L, LUA_REGISTRYINDEX); - - return 0; -} - -/* return a userdata from a C pointer */ -/* you are better to know what you are doing */ -int luaT_lua_pushudata(lua_State *L) -{ - void *udata = NULL; - const char *tname = luaL_checkstring(L, 2); - - if(lua_type(L, 1) == 10) - udata = *((void**)lua_topointer(L, 1)); - else if(luaT_iscdata(L, 1)) - udata = ((void**)lua_topointer(L, 1))[4]; - else if(lua_isnumber(L, 1)) - udata = (void*)(uintptr_t)lua_tonumber(L, 1); - else - luaL_argerror(L, 1, "expecting number or cdata"); - - luaT_pushudata(L, udata, tname); - - return 1; -} - -int luaT_lua_factory(lua_State *L) -{ - const char* tname = luaL_checkstring(L, 1); - if(luaT_pushmetatable(L, tname) && !lua_isnil(L, -1)) - { - lua_pushstring(L, "__factory"); - lua_rawget(L, -2); - } - else - { - lua_pushnil(L); - } - return 1; -} - -int luaT_lua_getconstructortable(lua_State *L) -{ - const char* tname = luaL_checkstring(L, 1); - if(luaT_pushmetatable(L, tname)) - { - lua_pushstring(L, "__constructor"); - lua_rawget(L, -2); - return 1; - } - return 0; -} - - -int luaT_lua_typename(lua_State *L) -{ - const char* tname = NULL; - luaL_checkany(L, 1); - if((tname = luaT_typename(L, 1))) - { - lua_pushstring(L, tname); - return 1; - } - return 0; -} - -int luaT_lua_isequal(lua_State *L) -{ - if(lua_isuserdata(L, 1) && lua_isuserdata(L, 2)) - { - void **u1, **u2; - luaL_argcheck(L, luaT_typename(L, 1), 1, "Torch object expected"); - luaL_argcheck(L, luaT_typename(L, 2), 2, "Torch object expected"); - - u1 = lua_touserdata(L, 1); - u2 = lua_touserdata(L, 2); - if(*u1 == *u2) - lua_pushboolean(L, 1); - else - lua_pushboolean(L, 0); - } - else if(lua_istable(L, 1) && lua_istable(L, 2)) - lua_pushboolean(L, lua_rawequal(L, 1, 2)); - else - lua_pushboolean(L, 0); - return 1; -} - -static void luaT_pushpointer(lua_State *L, const void *ptr) -{ -#if LUA_VERSION_NUM >= 503 - // this assumes that lua_Integer is a ptrdiff_t - if (sizeof(void *) > sizeof(lua_Integer)) - luaL_error(L, "Pointer value can't be represented as a Lua integer (an overflow would occur)"); - lua_pushinteger(L, (uintptr_t)(ptr)); -#else - // 2^53 - this assumes that lua_Number is a double - if ((uintptr_t)ptr > 9007199254740992LLU) - luaL_error(L, "Pointer value can't be represented as a Lua number (an overflow would occur)"); - lua_pushnumber(L, (uintptr_t)(ptr)); -#endif -} - -int luaT_lua_pointer(lua_State *L) -{ - if(lua_type(L, 1) == 10) /* luajit cdata */ - { - /* we want the pointer holded by cdata */ - /* not the pointer on the cdata object */ - const void* ptr = *((void**)lua_topointer(L, 1)); - luaT_pushpointer(L, ptr); - return 1; - } - else if (luaT_iscdata(L, 1)) /* luaffi cdata */ - { - void** ptr = (void**)lua_touserdata(L, 1); - luaT_pushpointer(L, ptr[4]); - return 1; - } - else if(lua_isuserdata(L, 1)) - { - void **ptr; - luaL_argcheck(L, luaT_typename(L, 1), 1, "Torch object expected"); - ptr = lua_touserdata(L, 1); - luaT_pushpointer(L, *ptr); - return 1; - } - else if(lua_istable(L, 1) || lua_isthread(L, 1) || lua_isfunction(L, 1)) - { - const void* ptr = lua_topointer(L, 1); - luaT_pushpointer(L, ptr); - return 1; - } - else if(lua_isstring(L, 1)) - { - const char* ptr = lua_tostring(L, 1); - luaT_pushpointer(L, ptr); - return 1; - } - else - luaL_error(L, "Torch object, table, thread, cdata or function expected"); - - return 0; -} - -int luaT_lua_setenv(lua_State *L) -{ - if(!lua_isfunction(L, 1) && !lua_isuserdata(L, 1)) - luaL_typerror(L, 1, "function or userdata"); - luaL_checktype(L, 2, LUA_TTABLE); - lua_setuservalue(L, 1); - return 0; -} - -int luaT_lua_getenv(lua_State *L) -{ - if(!lua_isfunction(L, 1) && !lua_isuserdata(L, 1)) - luaL_typerror(L, 1, "function or userdata"); - lua_getuservalue(L, 1); - if (lua_isnil(L, -1)) - lua_newtable(L); - return 1; -} - -int luaT_lua_getmetatable(lua_State *L) -{ - const char *tname = luaL_checkstring(L, 1); - if(luaT_pushmetatable(L, tname)) - return 1; - return 0; -} - -int luaT_lua_version(lua_State *L) -{ - luaL_checkany(L, 1); - - if(luaT_iscdata(L, 1)) - { - const char *tname = luaT_cdataname(L, 1, NULL); - if(tname) - { - luaT_pushmetatable(L, tname); - lua_pushstring(L, "__version"); - lua_rawget(L, -2); - return 1; - } - return 0; - } - else if(lua_getmetatable(L, 1)) - { - lua_pushstring(L, "__version"); - lua_rawget(L, -2); - return 1; - } - return 0; -} - -int luaT_lua_setmetatable(lua_State *L) -{ - const char *tname = luaL_checkstring(L, 2); - luaL_checktype(L, 1, LUA_TTABLE); - - if(!luaT_pushmetatable(L, tname)) - luaL_error(L, "unknown typename %s\n", tname); - lua_setmetatable(L, 1); - - return 1; -} - -/* metatable operator methods */ -static int luaT_mt__index(lua_State *L) -{ - if(!lua_getmetatable(L, 1)) - luaL_error(L, "critical internal indexing error: no metatable found"); - - if(!lua_istable(L, -1)) - luaL_error(L, "critical internal indexing error: not a metatable"); - - /* test for __index__ method first */ - lua_getfield(L, -1, "__index__"); - if(!lua_isnil(L, -1)) - { - int result; - - if(!lua_isfunction(L, -1)) - luaL_error(L, "critical internal indexing error: __index__ is not a function"); - - lua_pushvalue(L, 1); - lua_pushvalue(L, 2); - - lua_call(L, 2, LUA_MULTRET); /* DEBUG: risque: faut vraiment retourner 1 ou 2 valeurs... */ - - result = lua_toboolean(L, -1); - lua_pop(L, 1); - - if(result) - return 1; - - /* on the stack: 1. the object 2. the value 3. the metatable */ - /* apparently, __index wants only one element returned */ - /* return lua_gettop(L)-3; */ - - } - else - lua_pop(L, 1); /* remove nil __index__ on the stack */ - - lua_pushvalue(L, 2); - lua_gettable(L, -2); - - return 1; -} - -static int luaT_mt__newindex(lua_State *L) -{ - if(!lua_getmetatable(L, 1)) - luaL_error(L, "critical internal indexing error: no metatable found"); - - if(!lua_istable(L, -1)) - luaL_error(L, "critical internal indexing error: not a metatable"); - - /* test for __newindex__ method first */ - lua_getfield(L, -1, "__newindex__"); - if(!lua_isnil(L, -1)) - { - int result; - - if(!lua_isfunction(L, -1)) - luaL_error(L, "critical internal indexing error: __newindex__ is not a function"); - - lua_pushvalue(L, 1); - lua_pushvalue(L, 2); - lua_pushvalue(L, 3); - - lua_call(L, 3, 1); /* DEBUG: risque: faut vraiment retourner qqch */ - - result = lua_toboolean(L, -1); - lua_pop(L, 1); - - if(result) - return 0; - } - else - lua_pop(L, 1); /* remove nil __newindex__ on the stack */ - - lua_pop(L, 1); /* pop the metatable */ - if(lua_istable(L, 1)) - lua_rawset(L, 1); - else - luaL_error(L, "the class %s cannot be indexed", luaT_typename(L, 1)); - - return 0; -} - - -#define MT_UNI_OPERATOR_GET_HANDLER(NAME) \ - if(!lua_getmetatable(L, 1)) \ - luaL_error(L, "internal error in __" #NAME ": no metatable"); - -#define MT_BIN_OPERATOR_GET_HANDLER(NAME) \ - if(!lua_getmetatable(L, 1) && !lua_getmetatable(L,2) ) \ - luaL_error(L, "internal error in __" #NAME \ - ": no metatable in both operands"); - -#define MT_DECLARE_OPERATOR_BODY(NAME, NIL_BEHAVIOR) \ - \ - lua_getfield(L, -1, "__" #NAME "__"); \ - if(lua_isnil(L, -1)) \ - { \ - NIL_BEHAVIOR; \ - } \ - else \ - { \ - if(lua_isfunction(L, -1)) \ - { \ - lua_insert(L, 1); /* insert function */ \ - lua_pop(L, 1); /* remove metatable */ \ - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); \ - /* we return the result of the call */ \ - return lua_gettop(L); \ - } \ - /* we return the thing the user left in __tostring__ */ \ - } \ - return 0; \ - -/* note: check dans metatable pour ca, donc necessaire */ -#define MT_DECLARE_OPERATOR(NAME, NIL_BEHAVIOR) \ - int luaT_mt__##NAME(lua_State *L) \ - { \ - MT_UNI_OPERATOR_GET_HANDLER(NAME) \ - MT_DECLARE_OPERATOR_BODY(NAME,NIL_BEHAVIOR) \ - } - -#define MT_DECLARE_BIN_OPERATOR(NAME, NIL_BEHAVIOR) \ - int luaT_mt__##NAME(lua_State *L) \ - { \ - MT_BIN_OPERATOR_GET_HANDLER(NAME) \ - MT_DECLARE_OPERATOR_BODY(NAME,NIL_BEHAVIOR) \ - } - - -#define BIN_OPERATOR_ERROR(NAME) \ - luaL_error(L, "both %s and %s have no " #NAME " operator", \ - luaT_typename(L, 1), luaT_typename(L,2)) - -MT_DECLARE_BIN_OPERATOR(add, BIN_OPERATOR_ERROR(addition) ) -MT_DECLARE_BIN_OPERATOR(sub, BIN_OPERATOR_ERROR(substraction) ) -MT_DECLARE_BIN_OPERATOR(mul, BIN_OPERATOR_ERROR(multiplication) ) -MT_DECLARE_BIN_OPERATOR(div, BIN_OPERATOR_ERROR(division) ) -MT_DECLARE_BIN_OPERATOR(mod, BIN_OPERATOR_ERROR(modulo) ) -MT_DECLARE_BIN_OPERATOR(pow, BIN_OPERATOR_ERROR(power) ) -MT_DECLARE_BIN_OPERATOR(concat, BIN_OPERATOR_ERROR(concat) ) -MT_DECLARE_BIN_OPERATOR(eq, - lua_settop(L, 2); - lua_pushcfunction(L, luaT_lua_isequal); - lua_insert(L, 1); - lua_call(L, 2, 1); - return 1;) -MT_DECLARE_BIN_OPERATOR(lt, BIN_OPERATOR_ERROR(less-than) ) -MT_DECLARE_BIN_OPERATOR(le, BIN_OPERATOR_ERROR(less-equal) ) - -MT_DECLARE_OPERATOR(tostring, - lua_pushstring(L, luaT_typename(L, 1)); - return 1;) -MT_DECLARE_OPERATOR(call, luaL_error(L, "%s has no call operator", luaT_typename(L, 1))) -MT_DECLARE_OPERATOR(unm, luaL_error(L, "%s has no negation operator", luaT_typename(L, 1))) -MT_DECLARE_OPERATOR(len, luaL_error(L, "%s has no length operator", luaT_typename(L, 1))) - - -/* constructor metatable methods */ -int luaT_cmt__call(lua_State *L) -{ - if(!lua_istable(L, 1)) - luaL_error(L, "internal error in __call: not a constructor table"); - - if(!lua_getmetatable(L, 1)) - luaL_error(L, "internal error in __call: no metatable available"); - - lua_pushstring(L, "__new"); - lua_rawget(L, -2); - - if(lua_isnil(L, -1)) - luaL_error(L, "no constructor available"); - - lua_remove(L, 1); /* remove constructor atable */ - lua_insert(L, 1); /* insert constructor */ - lua_pop(L, 1); /* remove fancy metatable */ - - lua_call(L, lua_gettop(L)-1, LUA_MULTRET); - return lua_gettop(L); -} - -int luaT_cmt__newindex(lua_State *L) -{ - if(!lua_istable(L, 1)) - luaL_error(L, "internal error in __newindex: not a constructor table"); - - if(!lua_getmetatable(L, 1)) - luaL_error(L, "internal error in __newindex: no metatable available"); - - lua_pushstring(L, "__metatable"); - lua_rawget(L, -2); - - if(!lua_istable(L, -1)) - luaL_error(L, "internal error in __newindex: no metaclass available"); - - lua_insert(L, 2); - lua_pop(L, 1); /* remove the metatable over the constructor table */ - - lua_rawset(L, -3); - - return 0; -} - -/******************** deprecated functions ********************/ -int luaT_pushmetaclass(lua_State *L, const char *tname) -{ - return luaT_pushmetatable(L, tname); -} - -const char* luaT_id(lua_State *L, int ud) -{ - return luaT_typename(L, ud); -} - -const char* luaT_id2typename(lua_State *L, const char *id) -{ - return id; -} - -const char* luaT_typename2id(lua_State *L, const char *tname) -{ - return luaT_typenameid(L, tname); -} - -int luaT_getmetaclass(lua_State *L, int index) -{ - return lua_getmetatable(L, index); -} - -const char* luaT_checktypename2id(lua_State *L, const char *tname) -{ - const char* id = luaT_typenameid(L, tname); - if(!id) - luaL_error(L, "unknown class <%s>", tname); - return id; -} - -void luaT_registeratid(lua_State *L, const struct luaL_Reg *methods, const char *id) -{ - luaT_registeratname(L, methods, id); -} - -/**************************************************************/ diff --git a/contrib/lua-torch/torch7/lib/luaT/luaT.h b/contrib/lua-torch/torch7/lib/luaT/luaT.h deleted file mode 100644 index 2479a1dc1c..0000000000 --- a/contrib/lua-torch/torch7/lib/luaT/luaT.h +++ /dev/null @@ -1,135 +0,0 @@ -#ifndef LUAT_UTILS_INC -#define LUAT_UTILS_INC - -#ifdef __cplusplus -extern "C" { -#endif -#include -#include -#ifdef __cplusplus -} -#endif - -#ifndef LUA_EXTERNC -# ifdef __cplusplus -# define LUA_EXTERNC extern "C" -# else -# define LUA_EXTERNC extern -# endif -#endif - -#if (defined(_MSC_VER) || defined(__MINGW32__)) -# define DLL_EXPORT __declspec(dllexport) -# define DLL_IMPORT __declspec(dllimport) -# ifdef luaT_EXPORTS -# define LUAT_API LUA_EXTERNC DLL_EXPORT -# else -# define LUAT_API LUA_EXTERNC DLL_IMPORT -# endif -#else -# define DLL_EXPORT -# define DLL_IMPORT -# define LUAT_API LUA_EXTERNC -#endif - -#if LUA_VERSION_NUM == 501 -# define lua_pushglobaltable(L) lua_pushvalue(L, LUA_GLOBALSINDEX) -# define lua_setuservalue lua_setfenv -# define lua_getuservalue lua_getfenv -#else -# define lua_objlen lua_rawlen -static int luaL_typerror(lua_State *L, int narg, const char *tname) -{ - return luaL_error(L, "%s expected, got %s", tname, luaL_typename(L, narg)); -} -#endif - - -/* C functions */ - -LUAT_API void* luaT_alloc(lua_State *L, ptrdiff_t size); -LUAT_API void* luaT_realloc(lua_State *L, void *ptr, ptrdiff_t size); -LUAT_API void luaT_free(lua_State *L, void *ptr); - -LUAT_API void luaT_setfuncs(lua_State *L, const luaL_Reg *l, int nup); - -LUAT_API const char* luaT_newlocalmetatable(lua_State *L, const char *tname, const char *parent_tname, - lua_CFunction constructor, lua_CFunction destructor, lua_CFunction factory, int moduleidx); - -LUAT_API const char* luaT_newmetatable(lua_State *L, const char *tname, const char *parenttname, - lua_CFunction constructor, lua_CFunction destructor, lua_CFunction factory); - -LUAT_API int luaT_pushmetatable(lua_State *L, const char *tname); - -LUAT_API const char* luaT_typenameid(lua_State *L, const char *tname); -LUAT_API const char* luaT_typename(lua_State *L, int ud); - -LUAT_API void luaT_pushudata(lua_State *L, void *udata, const char *tname); -LUAT_API void *luaT_toudata(lua_State *L, int ud, const char *tname); -LUAT_API int luaT_isudata(lua_State *L, int ud, const char *tname); -LUAT_API void *luaT_checkudata(lua_State *L, int ud, const char *tname); - -LUAT_API void luaT_pushlong(lua_State *L, long n); -LUAT_API long luaT_checklong(lua_State *L, int idx); -LUAT_API long luaT_tolong(lua_State *L, int idx); - -LUAT_API void luaT_pushinteger(lua_State *L, ptrdiff_t n); -LUAT_API ptrdiff_t luaT_checkinteger(lua_State *L, int idx); - -LUAT_API void *luaT_getfieldcheckudata(lua_State *L, int ud, const char *field, const char *tname); -LUAT_API void *luaT_getfieldchecklightudata(lua_State *L, int ud, const char *field); -LUAT_API double luaT_getfieldchecknumber(lua_State *L, int ud, const char *field); -LUAT_API int luaT_getfieldcheckint(lua_State *L, int ud, const char *field); -LUAT_API const char* luaT_getfieldcheckstring(lua_State *L, int ud, const char *field); -LUAT_API int luaT_getfieldcheckboolean(lua_State *L, int ud, const char *field); -LUAT_API void luaT_getfieldchecktable(lua_State *L, int ud, const char *field); - -LUAT_API int luaT_typerror(lua_State *L, int ud, const char *tname); - -LUAT_API int luaT_checkboolean(lua_State *L, int ud); -LUAT_API int luaT_optboolean(lua_State *L, int ud, int def); - -LUAT_API void luaT_registeratname(lua_State *L, const struct luaL_Reg *methods, const char *name); - -/* utility functions */ -LUAT_API const char *luaT_classrootname(const char *tname); -LUAT_API int luaT_classmodulename(const char *tname, char *module_name); - -/* debug */ -LUAT_API void luaT_stackdump(lua_State *L); - -/* Lua functions */ -LUAT_API int luaT_lua_newmetatable(lua_State *L); -LUAT_API int luaT_lua_factory(lua_State *L); -LUAT_API int luaT_lua_getconstructortable(lua_State *L); -LUAT_API int luaT_lua_typename(lua_State *L); -LUAT_API int luaT_lua_isequal(lua_State *L); -LUAT_API int luaT_lua_pointer(lua_State *L); -LUAT_API int luaT_lua_setenv(lua_State *L); -LUAT_API int luaT_lua_getenv(lua_State *L); -LUAT_API int luaT_lua_getmetatable(lua_State *L); -LUAT_API int luaT_lua_version(lua_State *L); -LUAT_API int luaT_lua_setmetatable(lua_State *L); -LUAT_API int luaT_lua_metatype(lua_State *L); -LUAT_API int luaT_lua_pushudata(lua_State *L); - -/* deprecated functions */ -/* ids have been replaced by string names to identify classes */ -/* comments show what function (that you should use) they call now */ -#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) -#define LUAT_DEPRECATED __attribute__((__deprecated__)) -#elif (defined(_MSC_VER) || defined(__MINGW32__)) -#define LUAT_DEPRECATED __declspec(deprecated) -#else -#define LUAT_DEPRECATED -#endif - -LUAT_API LUAT_DEPRECATED int luaT_pushmetaclass(lua_State *L, const char *tname); /* same as luaT_pushmetatable */ -LUAT_API LUAT_DEPRECATED const char* luaT_id(lua_State *L, int ud); /* same as luaT_typename */ -LUAT_API LUAT_DEPRECATED const char* luaT_id2typename(lua_State *L, const char *id); /* same as luaT_typenameid */ -LUAT_API LUAT_DEPRECATED const char* luaT_typename2id(lua_State *L, const char*); /* same as luaT_typenameid */ -LUAT_API LUAT_DEPRECATED int luaT_getmetaclass(lua_State *L, int index); /* same as luaT_getmetatable */ -LUAT_API LUAT_DEPRECATED const char* luaT_checktypename2id(lua_State *L, const char *tname); /* same as luaT_typenameid */ -LUAT_API LUAT_DEPRECATED void luaT_registeratid(lua_State *L, const struct luaL_Reg *methods, const char *id); /* same as luaT_registeratname */ - -#endif diff --git a/contrib/lua-torch/torch7/lib/luaT/luaTConfig.cmake.in b/contrib/lua-torch/torch7/lib/luaT/luaTConfig.cmake.in deleted file mode 100644 index bfb20b87a4..0000000000 --- a/contrib/lua-torch/torch7/lib/luaT/luaTConfig.cmake.in +++ /dev/null @@ -1,9 +0,0 @@ -# Find the luaT includes and library -# -# LUAT_INCLUDE_DIR -- where to find the includes -# LUAT_LIBRARIES -- list of libraries to link against -# LUAT_FOUND -- set to 1 if found - -SET(LUAT_FOUND 1) -SET(LUAT_INCLUDE_DIR "@LUAT_INCLUDE_DIR@") -SET(LUAT_LIBRARIES "@LUAT_LIBRARIES@") diff --git a/contrib/lua-torch/torch7/mkdocs.yml b/contrib/lua-torch/torch7/mkdocs.yml deleted file mode 100644 index 39a34d7c23..0000000000 --- a/contrib/lua-torch/torch7/mkdocs.yml +++ /dev/null @@ -1,21 +0,0 @@ -site_name: torch7 -theme : simplex -repo_url : https://github.com/torch/torch7 -use_directory_urls : false -markdown_extensions: [extra] -docs_dir : doc -pages: -- [index.md, Home] -- [tensor.md, Tensor Library, Tensor] -- [maths.md, Tensor Library, Tensor Math] -- [storage.md, Tensor Library, Storage] -- [file.md, File I/O Library, File Interface] -- [diskfile.md, File I/O Library, Disk File] -- [memoryfile.md, File I/O Library, Memory File] -- [pipefile.md, File I/O Library, Pipe File] -- [serialization.md, File I/O Library, Serialization] -- [utility.md, Useful Utilities, Class] -- [timer.md, Useful Utilities, Timer] -- [tester.md, Useful Utilities, Tester] -- [cmdline.md, Useful Utilities, CmdLine] -- [random.md, Useful Utilities, Random] diff --git a/contrib/lua-torch/torch7/paths.lua.in b/contrib/lua-torch/torch7/paths.lua.in deleted file mode 100644 index 287770b125..0000000000 --- a/contrib/lua-torch/torch7/paths.lua.in +++ /dev/null @@ -1,11 +0,0 @@ -local paths = {} - -paths.install_prefix = [[@Torch_INSTALL_PREFIX@]] -paths.install_bin = [[@Torch_INSTALL_BIN@]] -paths.install_man = [[@Torch_INSTALL_MAN@]] -paths.install_lib = [[@Torch_INSTALL_LIB@]] -paths.install_share = [[@Torch_INSTALL_SHARE@]] -paths.install_include = [[@Torch_INSTALL_INCLUDE@]] -paths.install_cmake = [[@Torch_INSTALL_CMAKE@]] - -return paths diff --git a/contrib/lua-torch/torch7/random.c b/contrib/lua-torch/torch7/random.c deleted file mode 100644 index bbe6ba08e2..0000000000 --- a/contrib/lua-torch/torch7/random.c +++ /dev/null @@ -1,237 +0,0 @@ -#include "luaT.h" -#include "TH.h" - -extern void torch_Generator_init(lua_State *L); -extern void torch_Generator_new(lua_State *L); - -#ifndef _CWRAP_STR_ARG_TYPES_4821726c1947cdf3eebacade98173939 -#define _CWRAP_STR_ARG_TYPES_4821726c1947cdf3eebacade98173939 -#include "string.h" -static void str_arg_types(lua_State *L, char *buf, int n) { - int i; - int nargs = lua_gettop(L); - if (nargs == 0) { - snprintf(buf, n, "no arguments provided"); - return; - } - for (i = 1; i <= nargs; i++) { - int l; - const char *torch_type = luaT_typename(L, i); - if(torch_type && !strncmp(torch_type, "torch.", 6)) torch_type += 6; - if (torch_type) l = snprintf(buf, n, "%s ", torch_type); - else if(lua_isnil(L, i)) l = snprintf(buf, n, "%s ", "nil"); - else if(lua_isboolean(L, i)) l = snprintf(buf, n, "%s ", "boolean"); - else if(lua_isnumber(L, i)) l = snprintf(buf, n, "%s ", "number"); - else if(lua_isstring(L, i)) l = snprintf(buf, n, "%s ", "string"); - else if(lua_istable(L, i)) l = snprintf(buf, n, "%s ", "table"); - else if(lua_isuserdata(L, i)) l = snprintf(buf, n, "%s ", "userdata"); - else l = snprintf(buf, n, "%s ", "???"); - if (l >= n) return; - buf += l; - n -= l; - } -} -#endif -static int wrapper_seed(lua_State *L) -{ -int narg = lua_gettop(L); -THGenerator *arg1 = NULL; -long arg2 = 0; -if(narg == 0 -) -{ -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator]", type_buf); -} -arg2 = THRandom_seed(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int wrapper_initialSeed(lua_State *L) -{ -int narg = lua_gettop(L); -THGenerator *arg1 = NULL; -long arg2 = 0; -if(narg == 0 -) -{ -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator]", type_buf); -} -arg2 = THRandom_initialSeed(arg1); -lua_pushnumber(L, (lua_Number)arg2); -return 1; -} - -static int wrapper_manualSeed(lua_State *L) -{ -int narg = lua_gettop(L); -THGenerator *arg1 = NULL; -long arg2 = 0; -if(narg == 1 -&& lua_isnumber(L, 1) -) -{ -arg2 = (long)lua_tonumber(L, 1); -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& lua_isnumber(L, 2) -) -{ -arg2 = (long)lua_tonumber(L, 2); -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] long", type_buf); -} -THRandom_manualSeed(arg1,arg2); -return 0; -} - -static int wrapper_getRNGState(lua_State *L) -{ -int narg = lua_gettop(L); -THGenerator *arg1 = NULL; -THByteTensor *arg2 = NULL; -int arg2_idx = 0; -if(narg == 0 -) -{ -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -arg2 = THByteTensor_new(); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -arg2 = THByteTensor_new(); -} -else if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg2_idx = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg2_idx = 2; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [*ByteTensor*]", type_buf); -} -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.ByteTensor"); -THByteTensor_getRNGState(arg1,arg2); -return 1; -} - -static int wrapper_setRNGState(lua_State *L) -{ -int narg = lua_gettop(L); -THGenerator *arg1 = NULL; -THByteTensor *arg2 = NULL; -int arg2_idx = 0; -if(narg == 0 -) -{ -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -arg2 = THByteTensor_new(); -} -else if(narg == 1 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -) -{ -arg2 = THByteTensor_new(); -} -else if(narg == 1 -&& (arg2 = luaT_toudata(L, 1, "torch.ByteTensor")) -) -{ -arg2_idx = 1; -lua_getglobal(L,"torch"); -arg1 = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator); -lua_pop(L, 2); -} -else if(narg == 2 -&& (arg1 = luaT_toudata(L, 1, torch_Generator)) -&& (arg2 = luaT_toudata(L, 2, "torch.ByteTensor")) -) -{ -arg2_idx = 2; -} -else -{ -char type_buf[512]; -str_arg_types(L, type_buf, 512); -luaL_error(L, "invalid arguments: %s\nexpected arguments: [Generator] [*ByteTensor*]", type_buf); -} -if(arg2_idx) -lua_pushvalue(L, arg2_idx); -else -luaT_pushudata(L, arg2, "torch.ByteTensor"); -THByteTensor_setRNGState(arg1,arg2); -return 1; -} - -static const struct luaL_Reg random__ [] = { -{"seed", wrapper_seed}, -{"initialSeed", wrapper_initialSeed}, -{"manualSeed", wrapper_manualSeed}, -{"getRNGState", wrapper_getRNGState}, -{"setRNGState", wrapper_setRNGState}, -{NULL, NULL} -}; - -void torch_random_init(lua_State *L) -{ - torch_Generator_init(L); - torch_Generator_new(L); - lua_setfield(L, -2, "_gen"); - luaT_setfuncs(L, random__, 0); -} diff --git a/contrib/lua-torch/torch7/random.lua b/contrib/lua-torch/torch7/random.lua deleted file mode 100644 index 59bbd7b1a0..0000000000 --- a/contrib/lua-torch/torch7/random.lua +++ /dev/null @@ -1,53 +0,0 @@ -local wrap = require 'cwrap' - -require 'torchcwrap' - -local interface = wrap.CInterface.new() - -interface:print( - [[ -#include "luaT.h" -#include "TH.h" - -extern void torch_Generator_init(lua_State *L); -extern void torch_Generator_new(lua_State *L); - ]]) - -for _,name in ipairs({"seed", "initialSeed"}) do - interface:wrap(name, - string.format("THRandom_%s",name), - {{name='Generator', default=true}, - {name="long", creturned=true}}) -end - -interface:wrap('manualSeed', - 'THRandom_manualSeed', - {{name='Generator', default=true}, - {name="long"}}) - -interface:wrap('getRNGState', - 'THByteTensor_getRNGState', - {{name='Generator', default=true}, - {name='ByteTensor',default=true,returned=true,method={default='nil'}} - }) - -interface:wrap('setRNGState', - 'THByteTensor_setRNGState', - {{name='Generator', default=true}, - {name='ByteTensor',default=true,returned=true,method={default='nil'}} - }) - -interface:register("random__") - -interface:print( - [[ -void torch_random_init(lua_State *L) -{ - torch_Generator_init(L); - torch_Generator_new(L); - lua_setfield(L, -2, "_gen"); - luaT_setfuncs(L, random__, 0); -} -]]) - -interface:tofile(arg[1]) diff --git a/contrib/lua-torch/torch7/test/longSize.lua b/contrib/lua-torch/torch7/test/longSize.lua deleted file mode 100644 index f5eba865c9..0000000000 --- a/contrib/lua-torch/torch7/test/longSize.lua +++ /dev/null @@ -1,61 +0,0 @@ -require 'torch' - -local tester = torch.Tester() -local tests = torch.TestSuite() - -local tensor = torch.rand(2,3) - -function tests.diskFileLongSize8() - f = torch.DiskFile('tensor8.bin','w') - f:binary() - f:longSize(8) - f:writeObject(tensor) - f:close() - f = torch.DiskFile('tensor8.bin','r') - f:binary() - f:longSize(8) - tensor2 = f:readObject() - f:close() - tester:assert(tensor:norm()==tensor2:norm()) - os.remove('tensor8.bin') -end - -function tests.diskFileLongSize4() - f = torch.DiskFile('tensor4.bin','w') - f:binary() - f:longSize(4) - f:writeObject(tensor) - f:close() - f = torch.DiskFile('tensor4.bin','r') - f:binary() - f:longSize(4) - tensor2 = f:readObject() - f:close() - tester:assert(tensor:norm()==tensor2:norm()) - os.remove('tensor4.bin') -end - -function tests.memoryFileLongSize8() - f = torch.MemoryFile() - f:binary() - f:longSize(8) - f:writeObject(tensor) - f:seek(1) - tensor2 = f:readObject() - f:close() - tester:assert(tensor:norm()==tensor2:norm()) -end - -function tests.memoryFileLongSize4() - f = torch.MemoryFile() - f:binary() - f:longSize(4) - f:writeObject(tensor) - f:seek(1) - tensor2 = f:readObject() - f:close() - tester:assert(tensor:norm()==tensor2:norm()) -end - -tester:add(tests) -tester:run() diff --git a/contrib/lua-torch/torch7/test/test.lua b/contrib/lua-torch/torch7/test/test.lua deleted file mode 100644 index 6c5dc71b03..0000000000 --- a/contrib/lua-torch/torch7/test/test.lua +++ /dev/null @@ -1,3969 +0,0 @@ ---require 'torch' - -local mytester -local torchtest = torch.TestSuite() -local msize = 100 -local precision - --- Lua 5.2 compatibility -local loadstring = loadstring or load -local unpack = unpack or table.unpack - -local function maxdiff(x,y) - local d = x-y - if x:type() == 'torch.DoubleTensor' or x:type() == 'torch.FloatTensor' then - return d:abs():max() - else - local dd = torch.Tensor():resize(d:size()):copy(d) - return dd:abs():max() - end -end - --- workarounds for non-existent functions -function torch.HalfTensor:__sub(other) - return (self:real() - other:real()):half() -end - -function torch.HalfTensor:mean(dim) - return self:real():mean(dim):half() -end - -function torch.HalfTensor:abs() - return self:real():abs():half() -end - -function torch.HalfTensor:max() - return self:real():max() -end - -function torch.HalfTensor:add(a, b) - return (self:real():add(a, b:real())):half() -end - -function torch.HalfTensor:reshape(a, b) - return (self:real():reshape(a, b)):half() -end - -function torch.HalfTensor:fill(a) - return self:real():fill(a):half() -end - -function torchtest.dot() - local types = { - ['torch.DoubleTensor'] = 1e-8, -- for ddot - ['torch.FloatTensor'] = 1e-4, -- for sdot - } - for tname, prec in pairs(types) do - local v1 = torch.randn(100):type(tname) - local v2 = torch.randn(100):type(tname) - - local res1 = torch.dot(v1,v2) - - local res2 = 0 - for i = 1,v1:size(1) do - res2 = res2 + v1[i] * v2[i] - end - - local err = math.abs(res1-res2) - - mytester:assertlt(err, prec, 'error in torch.dot (' .. tname .. ')') - end -end - -local genericSingleOpTest = [[ - -- [res] torch.functionname([res,] x) - -- contiguous - local m1 = torch.randn(100,100) - local res1 = torch.functionname(m1[{ 4,{} }]) - local res2 = res1:clone():zero() - for i = 1,res1:size(1) do - res2[i] = math.functionname(m1[4][i]) - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - err[i] = math.abs(res1[i] - res2[i]) - end - -- find maximum element of error - local maxerrc = 0 - for i = 1, err:size(1) do - if err[i] > maxerrc then - maxerrc = err[i] - end - end - - -- non-contiguous - local m1 = torch.randn(100,100) - local res1 = torch.functionname(m1[{ {}, 4 }]) - local res2 = res1:clone():zero() - for i = 1,res1:size(1) do - res2[i] = math.functionname(m1[i][4]) - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - err[i] = math.abs(res1[i] - res2[i]) - end - -- find maximum element of error - local maxerrnc = 0 - for i = 1, err:size(1) do - if err[i] > maxerrnc then - maxerrnc = err[i] - end - end - return maxerrc, maxerrnc ---]] - -function torchtest.sin() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'sin')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.sinh() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'sinh')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.asin() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'asin')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.cos() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'cos')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.cosh() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'cosh')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.acos() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'acos')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.tan() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'tan')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.tanh() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'tanh')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.atan() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'atan')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.log() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'log')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.sqrt() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'sqrt')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.rsqrt() - local function TH_rsqrt(x) - return 1 / math.sqrt(x) - end - - local f - local t = genericSingleOpTest:gsub('functionname', 'rsqrt'):gsub('math.rsqrt', 'TH_rsqrt') - local env = { TH_rsqrt=TH_rsqrt, torch=torch, math=math } - if not setfenv then -- Lua 5.2 - f = load(t, 'test', 't', env) - else - f = loadstring(t) - setfenv(f, env) - end - - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.sigmoid() - -- can't use genericSingleOpTest, since `math.sigmoid` doesn't exist, have to use - -- `torch.sigmoid` instead - local inputValues = {-1000,-1,0,0.5,1,2,1000} - local expectedOutput = {0.0000, 0.2689, 0.5, 0.6225, 0.7311, 0.8808, 1.000} - - local precision_4dps = 0.0002 - - -- float - local inputFT = torch.FloatTensor(inputValues) - local expectedFT = torch.FloatTensor(expectedOutput) - mytester:assertlt((torch.sigmoid(inputFT) - expectedFT):abs():max(), precision_4dps, 'error in torch.sigmoid - single') - mytester:assertlt((inputFT - torch.FloatTensor(inputValues)):abs():max(), precision_4dps, 'error in torch.sigmoid - single') - local sigmoidFT = torch.FloatTensor(inputValues):sigmoid() - mytester:assertlt((sigmoidFT - expectedFT):abs():max(), precision_4dps, 'error in torch.sigmoid - single') - - -- double - local inputDT = torch.DoubleTensor(inputValues) - local expectedDT = torch.DoubleTensor(expectedOutput) - mytester:assertlt((torch.sigmoid(inputDT) - expectedDT):abs():max(), precision_4dps, 'error in torch.sigmoid - double') - mytester:assertlt((inputDT - torch.DoubleTensor(inputValues)):abs():max(), precision_4dps, 'error in torch.sigmoid - double') - local sigmoidDT = torch.DoubleTensor(inputValues):sigmoid() - mytester:assertlt((sigmoidDT - expectedDT):abs():max(), precision_4dps, 'error in torch.sigmoid - double') -end - -function torchtest.exp() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'exp')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.floor() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'floor')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.ceil() - local f = loadstring(string.gsub(genericSingleOpTest, 'functionname', 'ceil')) - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.frac() - local function TH_frac(x) - return math.fmod(x, 1) - end - - local f - local t = genericSingleOpTest:gsub('functionname', 'frac'):gsub('math.frac', 'TH_frac') - local env = { TH_frac=TH_frac, torch=torch, math=math } - if not setfenv then -- Lua 5.2 - f = load(t, 'test', 't', env) - else - f = loadstring(t) - setfenv(f, env) - end - - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.trunc() - local function TH_trunc(x) - return x - math.fmod(x, 1) - end - - local f - local t = genericSingleOpTest:gsub('functionname', 'trunc'):gsub('math.trunc', 'TH_trunc') - local env = { TH_trunc=TH_trunc, torch=torch, math=math } - if not setfenv then -- Lua 5.2 - f = load(t, 'test', 't', env) - else - f = loadstring(t) - setfenv(f, env) - end - - local maxerrc, maxerrnc = f() - mytester:assertlt(maxerrc, precision, 'error in torch.functionname - contiguous') - mytester:assertlt(maxerrnc, precision, 'error in torch.functionname - non-contiguous') -end - -function torchtest.round() - -- [res] torch.round([res,] x) - -- contiguous - local m1 = torch.randn(100,100) - local res1 = torch.round(m1[{ 4,{} }]) - local res2 = res1:clone():zero() - for i = 1,res1:size(1) do - res2[i] = math.floor(m1[4][i]+0.5) - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - err[i] = math.abs(res1[i] - res2[i]) - end - -- find maximum element of error - local maxerrc = 0 - for i = 1, err:size(1) do - if err[i] > maxerrc then - maxerrc = err[i] - end - end - mytester:assertlt(maxerrc, precision, 'error in torch.round - contiguous') - - -- non-contiguous - local m1 = torch.randn(100,100) - local res1 = torch.round(m1[{ {}, 4 }]) - local res2 = res1:clone():zero() - for i = 1,res1:size(1) do - res2[i] = math.floor(m1[i][4]+0.5) - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - err[i] = math.abs(res1[i] - res2[i]) - end - -- find maximum element of error - local maxerrnc = 0 - for i = 1, err:size(1) do - if err[i] > maxerrnc then - maxerrnc = err[i] - end - end - mytester:assertlt(maxerrnc, precision, 'error in torch.round - non-contiguous') -end - -function torchtest.max() -- torch.max([resval, resind,] x [,dim]) - - -- TH_TENSOR_BASE - local m1 = torch.Tensor(8,2):fill(3):select(2, 1) - local resval, resind = torch.max(m1, 1) - mytester:assert(resind[1] == 1) - - -- torch.max( x ) - -- contiguous - local m1 = torch.randn(100,100) - local res1 = torch.max(m1) - local res2 = m1[1][1] - for i = 1,m1:size(1) do - for j = 1,m1:size(2) do - if m1[i][j] > res2 then - res2 = m1[i][j] - end - end - end - local err = res1 - res2 - mytester:assertlt(err, precision, 'error in torch.max - contiguous') - - -- non-contiguous - local m1 = torch.randn(10,10,10) - local m2 = m1[{{}, 4, {}}] - local res1 = torch.max(m2) - local res2 = m2[1][1] - for i = 1,m2:size(1) do - for j = 1,m2:size(2) do - if m2[i][j] > res2 then - res2 = m2[i][j] - end - end - end - local err = res1 - res2 - mytester:assertlt(err, precision, 'error in torch.max - non-contiguous') - - -- torch.max([resval, resind,] x ,dim]) - function lua_max(t, dim) - assert(t:nDimension() == 2) - max_val = t:narrow(dim, 1, 1):clone() - max_ind = t:narrow(dim, 1, 1):clone():long():fill(1) - other = 3 - dim - for i = 1, t:size(other) do - for j = 1, t:size(dim) do - val = t:select(other, i):select(dim, j) - max = max_val:select(other, i):select(dim, 1) - if val > max then - max_val:select(other, i):fill(val) - max_ind:select(other, i):fill(j) - end - end - end - return max_val, max_ind - end - - local m1 = torch.randn(100,100) - for dim = 1,2 do - local res1val, res1ind = torch.max(m1, dim) - local res2val, res2ind = lua_max(m1, dim) - mytester:asserteq((res1val-res2val):abs():max(), 0, 'error in torch.max') - mytester:asserteq((res1ind-res2ind):abs():max(), 0, 'error in torch.max') - end - - -- NaNs - for index in pairs{1, 5, 100} do - local m1 = torch.randn(100) - m1[index] = 0/0 - local res1val, res1ind = torch.max(m1, 1) - mytester:assert(res1val[1] ~= res1val[1], 'error in torch.max (value) - NaNs') - mytester:assert(res1ind[1] == index, 'error in torch.max (index) - NaNs') - local res1val = torch.max(m1) - mytester:assert(res1val ~= res1val, 'error in torch.max - NaNs') - end - - -- dim == nDim -1 - local a = torch.Tensor({{1,2},{3,4}}):select(2, 1) - local aval, aind = torch.max(a, 1) - mytester:assert(aval[1] == 3) - mytester:assert(aind[1] == 2) - - local b = torch.Tensor({{{1,2},{3,4}},{{5,6},{7,8}}}):select(3, 1) - local bval, bind = torch.max(b, 2) - mytester:assert(bval[1][1] == 3) - mytester:assert(bind[1][1] == 2) - mytester:assert(bval[2][1] == 7) - mytester:assert(bind[2][1] == 2) -end - -function torchtest.min() -- torch.min([resval, resind,] x [,dim]) - -- torch.min( x ) - -- contiguous - local m1 = torch.randn(100,100) - local res1 = torch.min(m1) - local res2 = m1[1][1] - for i = 1,m1:size(1) do - for j = 1,m1:size(2) do - if m1[i][j] < res2 then - res2 = m1[i][j] - end - end - end - local err = res1 - res2 - mytester:assertlt(err, precision, 'error in torch.min - contiguous') - -- non-contiguous - local m1 = torch.randn(10,10,10) - local m2 = m1[{{}, 4, {}}] - local res1 = torch.min(m2) - local res2 = m2[1][1] - for i = 1,m2:size(1) do - for j = 1,m2:size(2) do - if m2[i][j] < res2 then - res2 = m2[i][j] - end - end - end - local err = res1 - res2 - mytester:assertlt(err, precision, 'error in torch.min - non-contiguous') - - -- torch.max([resval, resind,] x ,dim]) - function lua_min(t, dim) - assert(t:nDimension() == 2) - max_val = t:narrow(dim, 1, 1):clone() - max_ind = t:narrow(dim, 1, 1):clone():long():fill(1) - other = 3 - dim - for i = 1, t:size(other) do - for j = 1, t:size(dim) do - val = t:select(other, i):select(dim, j) - max = max_val:select(other, i):select(dim, 1) - if val < max then - max_val:select(other, i):fill(val) - max_ind:select(other, i):fill(j) - end - end - end - return max_val, max_ind - end - - local m1 = torch.randn(100,100) - for dim = 1,2 do - local res1val, res1ind = torch.min(m1, dim) - local res2val, res2ind = lua_min(m1, dim) - mytester:asserteq((res1val-res2val):abs():max(), 0, 'error in torch.max') - mytester:asserteq((res1ind-res2ind):abs():max(), 0, 'error in torch.max') - end - - -- NaNs - for index in pairs{1, 5, 100} do - local m1 = torch.randn(100) - m1[index] = 0/0 - local res1val, res1ind = torch.min(m1, 1) - mytester:assert(res1val[1] ~= res1val[1], 'error in torch.min (value) - NaNs') - mytester:assert(res1ind[1] == index, 'error in torch.min (index) - NaNs') - local res1val = torch.min(m1) - mytester:assert(res1val ~= res1val, 'error in torch.min - NaNs') - end - - -- TH_TENSOR_BASE - local m1 = torch.Tensor(4):fill(3) - local resval, resind = torch.min(m1, 1) - mytester:assert(resind[1] == 1) -end - -function torchtest.cmax() - -- Two tensors. - local a = torch.rand(msize, msize) - local b = torch.rand(msize, msize) - local c = torch.cmax(a, b) - local expected_c = torch.zeros(msize, msize) - expected_c:map2(a, b, function(_, a, b) return math.max(a, b) end) - mytester:assertTensorEq(expected_c, c, 0, - 'error in torch.cmax(tensor, tensor)') - - -- Tensor and scalar. - local v = torch.uniform() - c = torch.cmax(a, v) - expected_c:map(a, function(_, a) return math.max(a, v) end) - mytester:assertTensorEq(expected_c, c, 0, - 'error in torch.cmax(tensor, scalar).') -end - -function torchtest.cmin() - -- Two tensors. - local a = torch.rand(msize, msize) - local b = torch.rand(msize, msize) - local c = torch.cmin(a, b) - local expected_c = torch.zeros(msize, msize) - expected_c:map2(a, b, function(_, a, b) return math.min(a, b) end) - mytester:assertTensorEq(expected_c, c, 0, - 'error in torch.cmin(tensor, tensor)') - - -- Tensor and scalar. - local v = torch.uniform() - c = torch.cmin(a, v) - expected_c:map(a, function(_, a) return math.min(a, v) end) - mytester:assertTensorEq(expected_c, c, 0, - 'error in torch.cmin(tensor, scalar).') -end - -function torchtest.lerp() - local function TH_lerp(a, b, weight) - return a + weight * (b-a); - end - - local a = torch.rand(msize, msize) - local b = torch.rand(msize, msize) - local w = math.random() - local result = torch.lerp(a, b, w) - local expected = a:new() - expected:map2(a, b, function(_, a, b) return TH_lerp(a, b, w) end) - mytester:assertTensorEq(expected, result, precision, 'error in torch.lerp(tensor, tensor, weight)') - - local a = (math.random()*2-1) * 100000 - local b = (math.random()*2-1) * 100000 - local w = math.random() - local result = torch.lerp(a, b, w) - local expected = TH_lerp(a, b, w) - mytester:assertalmosteq(expected, result, precision, 'error in torch.lerp(scalar, scalar, weight)') -end - -for i, v in ipairs{{10}, {5, 5}} do - torchtest['allAndAny' .. i] = - function () - local x = torch.ones(unpack(v)):byte() - mytester:assert(x:all(), 'error in all()') - mytester:assert(x:any(), 'error in any()') - - x[3] = 0 - mytester:assert(not x:all(), 'error in all()') - mytester:assert(x:any(), 'error in any()') - - x:zero() - mytester:assert(not x:all(), 'error in all()') - mytester:assert(not x:any(), 'error in any()') - - x:fill(2) - mytester:assert(x:all(), 'error in all()') - mytester:assert(x:any(), 'error in any()') - end -end - -function torchtest.mv() - local m1 = torch.randn(100,100) - local v1 = torch.randn(100) - - local res1 = torch.mv(m1,v1) - - local res2 = res1:clone():zero() - for i = 1,m1:size(1) do - for j = 1,m1:size(2) do - res2[i] = res2[i] + m1[i][j] * v1[j] - end - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.mv') -end - -function torchtest.fill() - local types = { - 'torch.ByteTensor', - 'torch.CharTensor', - 'torch.ShortTensor', - 'torch.IntTensor', - 'torch.FloatTensor', - 'torch.DoubleTensor', - 'torch.LongTensor', - } - - for k,t in ipairs(types) do - -- [res] torch.fill([res,] tensor, value) - local m1 = torch.ones(100,100):type(t) - local res1 = m1:clone() - res1[{ 3,{} }]:fill(2) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ 3,i }] = 2 - end - - local err = (res1-res2):double():abs():max() - - mytester:assertlt(err, precision, 'error in torch.fill - contiguous') - - local m1 = torch.ones(100,100):type(t) - local res1 = m1:clone() - res1[{ {},3 }]:fill(2) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ i,3 }] = 2 - end - - local err = (res1-res2):double():abs():max() - - mytester:assertlt(err, precision, 'error in torch.fill - non contiguous') - end -end - -function torchtest.add() - local types = { - 'torch.ByteTensor', - 'torch.CharTensor', - 'torch.ShortTensor', - 'torch.IntTensor', - 'torch.FloatTensor', - 'torch.DoubleTensor', - 'torch.LongTensor', - } - - for k,t in ipairs(types) do - -- [res] torch.add([res,] tensor1, tensor2) - local m1 = torch.randn(100,100):type(t) - local v1 = torch.randn(100):type(t) - - local res1 = torch.add(m1[{ 4,{} }],v1) - - local res2 = res1:clone():zero() - for i = 1,m1:size(2) do - res2[i] = m1[4][i] + v1[i] - end - - local err = (res1-res2):double():abs():max() - - mytester:assertlt(err, precision, 'error in torch.add - contiguous' .. ' ' .. t) - - local m1 = torch.randn(100,100):type(t) - local v1 = torch.randn(100):type(t) - - local res1 = torch.add(m1[{ {},4 }],v1) - - local res2 = res1:clone():zero() - for i = 1,m1:size(1) do - res2[i] = m1[i][4] + v1[i] - end - - local err = (res1-res2):double():abs():max() - - mytester:assertlt(err, precision, 'error in torch.add - non contiguous' .. ' ' .. t) - - -- [res] torch.add([res,] tensor, value) - local m1 = torch.randn(10,10):type(t) - local res1 = m1:clone() - res1[{ 3,{} }]:add(2) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ 3,i }] = res2[{ 3,i }] + 2 - end - - local err = (res1-res2):double():abs():max() - - mytester:assertlt(err, precision, 'error in torch.add - scalar, contiguous' .. ' ' .. t) - - local m1 = torch.randn(10,10) - local res1 = m1:clone() - res1[{ {},3 }]:add(2) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ i,3 }] = res2[{ i,3 }] + 2 - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.add - scalar, non contiguous' .. ' ' .. t) - - -- [res] torch.add([res,] tensor1, value, tensor2) - end -end - -function torchtest.csub() - local rngState = torch.getRNGState() - torch.manualSeed(123) - - local a = torch.randn(100,90) - local b = a:clone():normal() - - local res_add = torch.add(a, -1, b) - local res_csub = a:clone() - res_csub:csub(b) - - mytester:assertlt((res_add - res_csub):abs():max(), 0.00001) - - local _ = torch.setRNGState(rngState) -end - -function torchtest.csub_scalar() - local rngState = torch.getRNGState() - torch.manualSeed(123) - - local a = torch.randn(100,100) - - local scalar = 123.5 - local res_add = torch.add(a, -scalar) - local res_csub = a:clone() - res_csub:csub(scalar) - - mytester:assertlt((res_add - res_csub):abs():max(), 0.00001) - - local _ = torch.setRNGState(rngState) -end - -function torchtest.neg() - local rngState = torch.getRNGState() - torch.manualSeed(123) - - local a = torch.randn(100,90) - local zeros = torch.Tensor():resizeAs(a):zero() - - local res_add = torch.add(zeros, -1, a) - local res_neg = a:clone() - res_neg:neg() - - mytester:assertlt((res_add - res_neg):abs():max(), 0.00001) - - local _ = torch.setRNGState(rngState) -end - -function torchtest.cinv() - local rngState = torch.getRNGState() - torch.manualSeed(123) - - local a = torch.randn(100,89) - local zeros = torch.Tensor():resizeAs(a):zero() - - local res_pow = torch.pow(a, -1) - local res_inv = a:clone() - res_inv:cinv() - - mytester:assertlt((res_pow - res_inv):abs():max(), 0.00001) - - local _ = torch.setRNGState(rngState) -end - -function torchtest.mul() - local types = { - 'torch.ByteTensor', - 'torch.CharTensor', - 'torch.ShortTensor', - 'torch.IntTensor', - 'torch.FloatTensor', - 'torch.DoubleTensor', - 'torch.LongTensor', - } - - for k,t in ipairs(types) do - local m1 = torch.randn(10,10):type(t) - local res1 = m1:clone() - - res1[{ {},3 }]:mul(2) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ i,3 }] = res2[{ i,3 }] * 2 - end - - local err = (res1-res2):double():abs():max() - - mytester:assertlt(err, precision, 'error in torch.mul - scalar, non contiguous' .. ' ' .. t) - end -end - -function torchtest.div() - local types = { - 'torch.ByteTensor', - 'torch.CharTensor', - 'torch.ShortTensor', - 'torch.IntTensor', - 'torch.FloatTensor', - 'torch.DoubleTensor', - 'torch.LongTensor', - } - - for k,t in ipairs(types) do - - local m1 = torch.Tensor(10,10):uniform(0,10):type(t) - local res1 = m1:clone() - - res1[{ {},3 }]:div(2) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - local ok = pcall(function() res2[{ i,3 }] = res2[{ i,3 }] / 2 end) - if not ok then - res2[{ i,3 }] = torch.floor(res2[{ i,3 }] / 2) - end - end - - local err = (res1-res2):double():abs():max() - - mytester:assertlt(err, precision, 'error in torch.div - scalar, non contiguous' .. ' ' .. t) - end -end - -function torchtest.lshift() - local m1 = torch.LongTensor(10,10):random(0,100) - local res1 = m1:clone() - - local q = 2 - local f = math.pow(2, q) - res1[{ {},3 }]:lshift(q) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ i,3 }] = res2[{ i,3 }] * f - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.lshift - scalar, non contiguous') - - local m1 = torch.LongTensor(10,10):random(0,100) - local res1 = m1:clone() - - local q = 2 - res1:lshift(q) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - for j = 1,m1:size(1) do - res2[{ i,j }] = res2[{ i,j }] * f - end - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.lshift - scalar, contiguous') -end - -function torchtest.rshift() - local m1 = torch.LongTensor(10,10):random(0,100) - local res1 = m1:clone() - - local q = 2 - local f = math.pow(2, q) - res1[{ {},3 }]:rshift(q) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ i,3 }] = math.floor(res2[{ i,3 }] / f) - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.rshift - scalar, non contiguous') - - local m1 = torch.LongTensor(10,10):random(0,100) - local res1 = m1:clone() - - local q = 2 - res1:rshift(q) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - for j = 1,m1:size(1) do - res2[{ i,j }] = math.floor(res2[{ i,j }] / f) - end - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.rshift - scalar, contiguous') -end - -function torchtest.fmod() - local m1 = torch.Tensor(10,10):uniform(-10, 10) - local res1 = m1:clone() - - local q = 2.1 - res1[{ {},3 }]:fmod(q) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ i,3 }] = math.fmod(res2[{ i,3 }], q) - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.fmod - scalar, non contiguous') -end - -function torchtest.remainder() - local m1 = torch.Tensor(10, 10):uniform(-10, 10) - local res1 = m1:clone() - - local q = 2.1 - res1[{ {},3 }]:remainder(q) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ i,3 }] = res2[{ i,3 }] % q - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.remainder - scalar, non contiguous') -end - -function torchtest.bitand() - local m1 = torch.LongTensor(10,10):random(0,100) - local res1 = m1:clone() - - local val = 32 -- This should be a power of 2 - res1[{ {},3 }]:bitand(val - 1) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ i,3 }] = res2[{ i,3 }] % val - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.bitand - scalar, non contiguous') - - local m1 = torch.LongTensor(10,10):random(0,100) - local res1 = m1:clone() - - res1:bitand(val - 1) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - for j = 1,m1:size(1) do - res2[{ i,j }] = res2[{ i,j }] % val - end - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.bitand - scalar, contiguous') -end - -function torchtest.bitor() - local m1 = torch.LongTensor(10,10):random(0,10000) - local res1 = m1:clone() - - local val = 32 -- This should be a power of 2 - res1[{ {},3 }]:bitor(val-1) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - res2[{ i,3 }] = math.floor(res2[{ i,3 }] / val) * val + (val - 1) - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.bitor - scalar, non contiguous') - - local m1 = torch.LongTensor(10,10):random(0,10000) - local res1 = m1:clone() - - res1:bitor(val - 1) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - for j = 1,m1:size(1) do - res2[{ i,j }] = math.floor(res2[{ i,j }] / val) * val + (val - 1) - end - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.bitor - scalar, contiguous') -end - -function torchtest.cbitxor() - local t1 = torch.LongTensor(10,10):random(0,10000) - local t2 = torch.LongTensor(10,10):random(10001,20000) - - -- Perform xor swap and check results - local t3 = torch.cbitxor(t1, t2) - local r1 = torch.cbitxor(t3, t2) - local r2 = torch.cbitxor(t3, t1) - - local err1 = (r1 - t1):abs():max() - local err2 = (r2 - t2):abs():max() - mytester:assertlt(err1 + err2, precision, 'error in torch.cbitxor contiguous') -end - -function torchtest.mm() - -- helper function - local function matrixmultiply(mat1,mat2) - local n = mat1:size(1) - local m = mat1:size(2) - local p = mat2:size(2) - local res = torch.zeros(n,p) - for i = 1, n do - for j = 1, p do - local sum = 0 - for k = 1, m do - sum = sum + mat1[i][k]*mat2[k][j] - end - res[i][j] = sum - end - end - return res - end - - -- contiguous case - local n, m, p = 10, 10, 5 - local mat1 = torch.randn(n,m) - local mat2 = torch.randn(m,p) - local res = torch.mm(mat1,mat2) - - local res2 = matrixmultiply(mat1,mat2) - mytester:assertTensorEq(res,res2,precision,'error in torch.mm') - - -- non contiguous case 1 - local n, m, p = 10, 10, 5 - local mat1 = torch.randn(n,m) - local mat2 = torch.randn(p,m):t() - local res = torch.mm(mat1,mat2) - - local res2 = matrixmultiply(mat1,mat2) - mytester:assertTensorEq(res,res2,precision,'error in torch.mm, non contiguous') - - -- non contiguous case 2 - local n, m, p = 10, 10, 5 - local mat1 = torch.randn(m,n):t() - local mat2 = torch.randn(m,p) - local res = torch.mm(mat1,mat2) - - local res2 = matrixmultiply(mat1,mat2) - mytester:assertTensorEq(res,res2,precision,'error in torch.mm, non contiguous') - - -- non contiguous case 3 - local n, m, p = 10, 10, 5 - local mat1 = torch.randn(m,n):t() - local mat2 = torch.randn(p,m):t() - local res = torch.mm(mat1,mat2) - - local res2 = matrixmultiply(mat1,mat2) - mytester:assertTensorEq(res,res2,precision,'error in torch.mm, non contiguous') - - -- test with zero stride - local n, m, p = 10, 10, 5 - local mat1 = torch.randn(n,m) - local mat2 = torch.randn(m,1):expand(m,p) - local res = torch.mm(mat1,mat2) - - local res2 = matrixmultiply(mat1,mat2) - mytester:assertTensorEq(res,res2,precision,'error in torch.mm, non contiguous, zero stride') - -end - -function torchtest.bmm() - local num_batches = 10 - local M, N, O = 23, 8, 12 - local b1 = torch.randn(num_batches, M, N) - local b2 = torch.randn(num_batches, N, O) - local res = torch.bmm(b1, b2) - - for i = 1, num_batches do - local r = torch.mm(b1[i], b2[i]) - mytester:assertTensorEq(r, res[i], precision, 'result matrix ' .. i .. ' wrong') - end -end - -function torchtest.addbmm() - local num_batches = 10 - local M, N, O = 12, 8, 5 - local b1 = torch.randn(num_batches, M, N) - local b2 = torch.randn(num_batches, N, O) - local res = torch.bmm(b1, b2) - local res2 = torch.Tensor():resizeAs(res[1]):zero() - - res2:addbmm(b1,b2) - mytester:assertTensorEq(res2, res:sum(1)[1], precision, 'addbmm result wrong') - - res2:addbmm(1,b1,b2) - mytester:assertTensorEq(res2, res:sum(1)[1]*2, precision, 'addbmm result wrong') - - res2:addbmm(1,res2,.5,b1,b2) - mytester:assertTensorEq(res2, res:sum(1)[1]*2.5, precision, 'addbmm result wrong') - - local res3 = torch.addbmm(1,res2,0,b1,b2) - mytester:assertTensorEq(res3, res2, precision, 'addbmm result wrong') - - local res4 = torch.addbmm(1,res2,.5,b1,b2) - mytester:assertTensorEq(res4, res:sum(1)[1]*3, precision, 'addbmm result wrong') - - local res5 = torch.addbmm(0,res2,1,b1,b2) - mytester:assertTensorEq(res5, res:sum(1)[1], precision, 'addbmm result wrong') - - local res6 = torch.addbmm(.1,res2,.5,b1,b2) - mytester:assertTensorEq(res6, res2*.1 + res:sum(1)*.5, precision, 'addbmm result wrong') -end - -function torchtest.baddbmm() - local num_batches = 10 - local M, N, O = 12, 8, 5 - local b1 = torch.randn(num_batches, M, N) - local b2 = torch.randn(num_batches, N, O) - local res = torch.bmm(b1, b2) - local res2 = torch.Tensor():resizeAs(res):zero() - - res2:baddbmm(b1,b2) - mytester:assertTensorEq(res2, res, precision, 'baddbmm result wrong') - - res2:baddbmm(1,b1,b2) - mytester:assertTensorEq(res2, res*2, precision, 'baddbmm result wrong') - - res2:baddbmm(1,res2,.5,b1,b2) - mytester:assertTensorEq(res2, res*2.5, precision, 'baddbmm result wrong') - - local res3 = torch.baddbmm(1,res2,0,b1,b2) - mytester:assertTensorEq(res3, res2, precision, 'baddbmm result wrong') - - local res4 = torch.baddbmm(1,res2,.5,b1,b2) - mytester:assertTensorEq(res4, res*3, precision, 'baddbmm result wrong') - - local res5 = torch.baddbmm(0,res2,1,b1,b2) - mytester:assertTensorEq(res5, res, precision, 'baddbmm result wrong') - - local res6 = torch.baddbmm(.1,res2,.5,b1,b2) - mytester:assertTensorEq(res6, res2*.1 + res*.5, precision, 'baddbmm result wrong') -end - -function torchtest.clamp() - local m1 = torch.rand(100):mul(5):add(-2.5) -- uniform in [-2.5, 2.5] - -- just in case we're extremely lucky: - local min_val = -1 - local max_val = 1 - m1[1] = min_val - m1[2] = max_val - local res1 = m1:clone() - - res1:clamp(min_val, max_val) - - local res2 = m1:clone() - for i = 1,m1:size(1) do - if res2[i] > max_val then - res2[i] = max_val - elseif res2[i] < min_val then - res2[i] = min_val - end - end - - local err = (res1-res2):abs():max() - - mytester:assertlt(err, precision, 'error in torch.clamp - scalar, non contiguous') -end - -function torchtest.pow() -- [res] torch.pow([res,] x) - -- base - tensor, exponent - number - -- contiguous - local m1 = torch.randn(100,100) - local res1 = torch.pow(m1[{ 4,{} }], 3) - local res2 = res1:clone():zero() - for i = 1,res1:size(1) do - res2[i] = math.pow(m1[4][i], 3) - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - err[i] = math.abs(res1[i] - res2[i]) - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - if err[i] > maxerr then - maxerr = err[i] - end - end - mytester:assertlt(maxerr, precision, 'error in torch.pow - contiguous') - - -- non-contiguous - local m1 = torch.randn(100,100) - local res1 = torch.pow(m1[{ {}, 4 }], 3) - local res2 = res1:clone():zero() - for i = 1,res1:size(1) do - res2[i] = math.pow(m1[i][4], 3) - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - err[i] = math.abs(res1[i] - res2[i]) - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - if err[i] > maxerr then - maxerr = err[i] - end - end - mytester:assertlt(maxerr, precision, 'error in torch.pow - non-contiguous') - - -- base - number, exponent - tensor - -- contiguous - local m1 = torch.randn(100,100) - local res1 = torch.pow(3, m1[{ 4,{} }]) - local res2 = res1:clone():zero() - for i = 1,res1:size(1) do - res2[i] = math.pow(3, m1[4][i]) - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - err[i] = math.abs(res1[i] - res2[i]) - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - if err[i] > maxerr then - maxerr = err[i] - end - end - mytester:assertlt(maxerr, precision, 'error in torch.pow - contiguous') - - -- non-contiguous - local m1 = torch.randn(100,100) - local res1 = torch.pow(3, m1[{ {}, 4 }]) - local res2 = res1:clone():zero() - for i = 1,res1:size(1) do - res2[i] = math.pow(3, m1[i][4]) - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - err[i] = math.abs(res1[i] - res2[i]) - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - if err[i] > maxerr then - maxerr = err[i] - end - end - mytester:assertlt(maxerr, precision, 'error in torch.pow - non-contiguous') -end - -function torchtest.cdiv() - local types = { - 'torch.ByteTensor', - 'torch.CharTensor', - 'torch.ShortTensor', - 'torch.IntTensor', - 'torch.FloatTensor', - 'torch.DoubleTensor', - 'torch.LongTensor', - } - - for k,t in ipairs(types) do - - -- [res] torch.cdiv([res,] tensor1, tensor2) - -- contiguous - local m1 = torch.Tensor(10, 10, 10):uniform(0,10):type(t) - local m2 = torch.Tensor(10, 10 * 10):uniform(0,10):type(t) - m2[m2:eq(0)] = 2 - local sm1 = m1[{4, {}, {}}] - local sm2 = m2[{4, {}}] - local res1 = torch.cdiv(sm1, sm2) - local res2 = res1:clone():zero() - for i = 1,sm1:size(1) do - for j = 1, sm1:size(2) do - local idx1d = (((i-1)*sm1:size(1)))+j - local ok = pcall(function() res2[i][j] = sm1[i][j] / sm2[idx1d] end) - if not ok then - res2[i][j] = torch.floor(sm1[i][j] / sm2[idx1d]) - end - end - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - for j = 1, res1:size(2) do - err[i][j] = math.abs(res1[i][j] - res2[i][j]) - end - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - for j = 1, err:size(2) do - if err[i][j] > maxerr then - maxerr = err[i][j] - end - end - end - mytester:assertlt(maxerr, precision, 'error in torch.cdiv - contiguous' .. ' ' .. t) - - -- non-contiguous - local m1 = torch.Tensor(10, 10, 10):uniform(0,10):type(t) - local m2 = torch.Tensor(10 * 10, 10 * 10):uniform(0,10):type(t) - m2[m2:eq(0)] = 2 - local sm1 = m1[{{}, 4, {}}] - local sm2 = m2[{{}, 4}] - local res1 = torch.cdiv(sm1, sm2) - local res2 = res1:clone():zero() - for i = 1,sm1:size(1) do - for j = 1, sm1:size(2) do - local idx1d = (((i-1)*sm1:size(1)))+j - local ok = pcall(function() res2[i][j] = sm1[i][j] / sm2[idx1d] end) - if not ok then - res2[i][j] = torch.floor(sm1[i][j] / sm2[idx1d]) - end - end - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - for j = 1, res1:size(2) do - err[i][j] = math.abs(res1[i][j] - res2[i][j]) - end - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - for j = 1, err:size(2) do - if err[i][j] > maxerr then - maxerr = err[i][j] - end - end - end - mytester:assertlt(maxerr, precision, 'error in torch.cdiv - non-contiguous' .. ' ' .. t) - end -end - -function torchtest.cfmod() - -- contiguous - local m1 = torch.Tensor(10, 10, 10):uniform(-10, 10) - local m2 = torch.Tensor(10, 10 * 10):uniform(-3, 3) - local sm1 = m1[{4, {}, {}}] - local sm2 = m2[{4, {}}] - local res1 = torch.cfmod(sm1, sm2) - local res2 = res1:clone():zero() - for i = 1,sm1:size(1) do - for j = 1, sm1:size(2) do - local idx1d = (((i-1)*sm1:size(1)))+j - res2[i][j] = math.fmod(sm1[i][j], sm2[idx1d]) - end - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - for j = 1, res1:size(2) do - err[i][j] = math.abs(res1[i][j] - res2[i][j]) - end - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - for j = 1, err:size(2) do - if err[i][j] > maxerr then - maxerr = err[i][j] - end - end - end - mytester:assertlt(maxerr, precision, 'error in torch.cfmod - contiguous') - - -- non-contiguous - local m1 = torch.Tensor(10, 10, 10):uniform(-10, 10) - local m2 = torch.Tensor(10 * 10, 10 * 10):uniform(-3, 3) - local sm1 = m1[{{}, 4, {}}] - local sm2 = m2[{{}, 4}] - local res1 = torch.cfmod(sm1, sm2) - local res2 = res1:clone():zero() - for i = 1,sm1:size(1) do - for j = 1, sm1:size(2) do - local idx1d = (((i-1)*sm1:size(1)))+j - res2[i][j] = math.fmod(sm1[i][j], sm2[idx1d]) - end - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - for j = 1, res1:size(2) do - err[i][j] = math.abs(res1[i][j] - res2[i][j]) - end - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - for j = 1, err:size(2) do - if err[i][j] > maxerr then - maxerr = err[i][j] - end - end - end - mytester:assertlt(maxerr, precision, 'error in torch.cfmod - non-contiguous') -end - -function torchtest.cremainder() - -- contiguous - local m1 = torch.Tensor(10, 10, 10):uniform(-10, 10) - local m2 = torch.Tensor(10, 10 * 10):uniform(-3, 3) - local sm1 = m1[{4, {}, {}}] - local sm2 = m2[{4, {}}] - local res1 = torch.cremainder(sm1, sm2) - local res2 = res1:clone():zero() - for i = 1,sm1:size(1) do - for j = 1, sm1:size(2) do - local idx1d = (((i-1)*sm1:size(1)))+j - res2[i][j] = sm1[i][j] % sm2[idx1d] - end - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - for j = 1, res1:size(2) do - err[i][j] = math.abs(res1[i][j] - res2[i][j]) - end - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - for j = 1, err:size(2) do - if err[i][j] > maxerr then - maxerr = err[i][j] - end - end - end - mytester:assertlt(maxerr, precision, 'error in torch.cremainder - contiguous') - - -- non-contiguous - local m1 = torch.Tensor(10, 10, 10):uniform(-10, 10) - local m2 = torch.Tensor(10 * 10, 10 * 10):uniform(-3, 3) - local sm1 = m1[{{}, 4, {}}] - local sm2 = m2[{{}, 4}] - local res1 = torch.cremainder(sm1, sm2) - local res2 = res1:clone():zero() - for i = 1,sm1:size(1) do - for j = 1, sm1:size(2) do - local idx1d = (((i-1)*sm1:size(1)))+j - res2[i][j] = sm1[i][j] % sm2[idx1d] - end - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - for j = 1, res1:size(2) do - err[i][j] = math.abs(res1[i][j] - res2[i][j]) - end - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - for j = 1, err:size(2) do - if err[i][j] > maxerr then - maxerr = err[i][j] - end - end - end - mytester:assertlt(maxerr, precision, 'error in torch.cremainder - non-contiguous') -end - -function torchtest.cmul() - local types = { - 'torch.ByteTensor', - 'torch.CharTensor', - 'torch.ShortTensor', - 'torch.IntTensor', - 'torch.FloatTensor', - 'torch.DoubleTensor', - 'torch.LongTensor', - } - - for k,t in ipairs(types) do - - -- [res] torch.cmul([res,] tensor1, tensor2) - -- contiguous - local m1 = torch.randn(10, 10, 10):type(t) - local m2 = torch.randn(10, 10 * 10):type(t) - local sm1 = m1[{4, {}, {}}] - local sm2 = m2[{4, {}}] - local res1 = torch.cmul(sm1, sm2) - local res2 = res1:clone():zero() - for i = 1,sm1:size(1) do - for j = 1, sm1:size(2) do - local idx1d = (((i-1)*sm1:size(1)))+j - res2[i][j] = sm1[i][j] * sm2[idx1d] - end - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - for j = 1, res1:size(2) do - err[i][j] = math.abs(res1[i][j] - res2[i][j]) - end - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - for j = 1, err:size(2) do - if err[i][j] > maxerr then - maxerr = err[i][j] - end - end - end - mytester:assertlt(maxerr, precision, 'error in torch.cmul - contiguous' .. ' ' .. t) - - -- non-contiguous - local m1 = torch.randn(10, 10, 10):type(t) - local m2 = torch.randn(10 * 10, 10 * 10):type(t) - local sm1 = m1[{{}, 4, {}}] - local sm2 = m2[{{}, 4}] - local res1 = torch.cmul(sm1, sm2) - local res2 = res1:clone():zero() - for i = 1,sm1:size(1) do - for j = 1, sm1:size(2) do - local idx1d = (((i-1)*sm1:size(1)))+j - res2[i][j] = sm1[i][j] * sm2[idx1d] - end - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - for j = 1, res1:size(2) do - err[i][j] = math.abs(res1[i][j] - res2[i][j]) - end - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - for j = 1, err:size(2) do - if err[i][j] > maxerr then - maxerr = err[i][j] - end - end - end - mytester:assertlt(maxerr, precision, 'error in torch.cmul - non-contiguous' .. ' ' .. t) - end -end - -function torchtest.cpow() -- [res] torch.cpow([res,] tensor1, tensor2) - -- contiguous - local m1 = torch.rand(10, 10, 10) - local m2 = torch.rand(10, 10 * 10) - local sm1 = m1[{4, {}, {}}] - local sm2 = m2[{4, {}}] - local res1 = torch.cpow(sm1, sm2) - local res2 = res1:clone():zero() - for i = 1,sm1:size(1) do - for j = 1, sm1:size(2) do - local idx1d = (((i-1)*sm1:size(1)))+j - res2[i][j] = math.pow(sm1[i][j], sm2[idx1d]) - end - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - for j = 1, res1:size(2) do - err[i][j] = math.abs(res1[i][j] - res2[i][j]) - end - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - for j = 1, err:size(2) do - if err[i][j] > maxerr then - maxerr = err[i][j] - end - end - end - mytester:assertlt(maxerr, precision, 'error in torch.cpow - contiguous') - - -- non-contiguous - local m1 = torch.rand(10, 10, 10) - local m2 = torch.rand(10 * 10, 10 * 10) - local sm1 = m1[{{}, 4, {}}] - local sm2 = m2[{{}, 4}] - local res1 = torch.cpow(sm1, sm2) - local res2 = res1:clone():zero() - for i = 1,sm1:size(1) do - for j = 1, sm1:size(2) do - local idx1d = (((i-1)*sm1:size(1)))+j - res2[i][j] = math.pow(sm1[i][j],sm2[idx1d]) - end - end - local err = res1:clone():zero() - -- find absolute error - for i = 1, res1:size(1) do - for j = 1, res1:size(2) do - err[i][j] = math.abs(res1[i][j] - res2[i][j]) - end - end - -- find maximum element of error - local maxerr = 0 - for i = 1, err:size(1) do - for j = 1, err:size(2) do - if err[i][j] > maxerr then - maxerr = err[i][j] - end - end - end - mytester:assertlt(maxerr, precision, 'error in torch.cpow - non-contiguous') -end - -function torchtest.sum() - local x = torch.rand(msize,msize) - local mx = torch.sum(x,2) - local mxx = torch.Tensor() - torch.sum(mxx,x,2) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.sum value') - - local y = torch.rand(5, 5, 5) - for i=1,3 do - local a = y:sum(i) - local b = y:narrow(i, 1, 1):clone():zero() - for j = 1, 5 do - b:add(y:narrow(i, j, 1)) - end - mytester:asserteq(maxdiff(a, b), 0, 'torch.sum value') - end -end -function torchtest.prod() - local x = torch.rand(msize,msize) - local mx = torch.prod(x,2) - local mxx = torch.Tensor() - torch.prod(mxx,x,2) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.prod value') - - local y = torch.rand(5, 5, 5) - for i=1,3 do - local a = y:prod(i) - local b = y:narrow(i, 1, 1):clone():fill(1) - for j = 1, 5 do - b:cmul(y:narrow(i, j, 1)) - end - mytester:asserteq(maxdiff(a, b), 0, 'torch.sum value') - end -end -function torchtest.cumsum() - local x = torch.rand(msize,msize) - local mx = torch.cumsum(x,2) - local mxx = torch.Tensor() - torch.cumsum(mxx,x,2) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.cumsum value') -end -function torchtest.cumprod() - local x = torch.rand(msize,msize) - local mx = torch.cumprod(x,2) - local mxx = torch.Tensor() - torch.cumprod(mxx,x,2) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.cumprod value') -end -function torchtest.cross() - local x = torch.rand(msize,3,msize) - local y = torch.rand(msize,3,msize) - local mx = torch.cross(x,y) - local mxx = torch.Tensor() - torch.cross(mxx,x,y) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.cross value') -end -function torchtest.zeros() - local mx = torch.zeros(msize,msize) - local mxx = torch.Tensor() - torch.zeros(mxx,msize,msize) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.zeros value') -end -function torchtest.histc() - local x = torch.Tensor{ 2, 4, 2, 2, 5, 4 } - local y = torch.histc(x, 5, 1, 5) -- nbins, min, max - local z = torch.Tensor{ 0, 3, 0, 2, 1 } - mytester:assertTensorEq(y,z,precision,'error in torch.histc') -end -function torchtest.bhistc() - local x = torch.Tensor(3, 6) - x[1] = torch.Tensor{ 2, 4, 2, 2, 5, 4 } - x[2] = torch.Tensor{ 3, 5, 1, 5, 3, 5 } - x[3] = torch.Tensor{ 3, 4, 2, 5, 5, 1 } - local y = torch.bhistc(x, 5, 1, 5) -- nbins, min, max - local z = torch.Tensor(3, 5) - z[1] = torch.Tensor{ 0, 3, 0, 2, 1 } - z[2] = torch.Tensor{ 1, 0, 2, 0, 3 } - z[3] = torch.Tensor{ 1, 1, 1, 1, 2 } - mytester:assertTensorEq(y,z,precision,'error in torch.bhistc in last dimension') -end -function torchtest.ones() - local mx = torch.ones(msize,msize) - local mxx = torch.Tensor() - torch.ones(mxx,msize,msize) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.ones value') -end -function torchtest.diag() - local x = torch.rand(msize,msize) - local mx = torch.diag(x) - local mxx = torch.Tensor() - torch.diag(mxx,x) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.diag value') -end -function torchtest.eye() - local mx = torch.eye(msize,msize) - local mxx = torch.Tensor() - torch.eye(mxx,msize,msize) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.eye value') -end -function torchtest.renorm() - local m1 = torch.randn(10,5) - local res1 = torch.Tensor() - local m2 - - local function renorm(matrix, value, dim, max_norm) - local m1 = matrix:transpose(dim, 1):contiguous() - -- collapse non-dim dimensions: - m2 = m1:reshape(m1:size(1), m1:nElement()/m1:size(1)) - local norms = m2:norm(value,2) - -- clip - local new_norms = norms:clone() - new_norms[torch.gt(norms, max_norm)] = max_norm - new_norms:cdiv(norms:add(1e-7)) - -- renormalize - m1:cmul(new_norms:expandAs(m1)) - return m1:transpose(dim, 1) - end - - -- note that the axis fed to torch.renorm is different (2~=1) - local maxnorm = m1:norm(2,1):mean() - m2 = renorm(m1,2,2,maxnorm) - - m1:renorm(2,2,maxnorm) - mytester:assertTensorEq(m1, m2, 0.00001, 'error in renorm') - mytester:assertTensorEq(m1:norm(2,1), m2:norm(2,1), 0.00001, 'error in renorm') - - m1 = torch.randn(3,4,5) - m2 = m1:transpose(2,3):contiguous():reshape(15,4) - - maxnorm = m2:norm(2,1):mean() - m2 = renorm(m2,2,2,maxnorm) - - m1:renorm(2,2,maxnorm) - local m3 = m1:transpose(2,3):contiguous():reshape(15,4) - mytester:assertTensorEq(m3, m2, 0.00001, 'error in renorm') - mytester:assertTensorEq(m3:norm(2,1), m2:norm(2,1), 0.00001, 'error in renorm') -end -function torchtest.multinomialwithreplacement() - local n_row = 3 - for n_col=4,5 do - local t=os.time() - torch.manualSeed(t) - local prob_dist = torch.rand(n_row,n_col) - prob_dist:select(2,n_col):fill(0) --index n_col shouldn't be sampled - local n_sample = n_col - local sample_indices = torch.multinomial(prob_dist, n_sample, true) - mytester:assert(prob_dist:dim() == 2, "wrong number of prob_dist dimensions") - mytester:assert(sample_indices:size(2) == n_sample, "wrong number of samples") - for i=1,n_row do - for j=1,n_sample do - mytester:assert(sample_indices[{i,j}] ~= n_col, "sampled an index with zero probability") - end - end - end -end -function torchtest.multinomialwithoutreplacement() - local n_row = 3 - for n_col=4,5 do - local t=os.time() - torch.manualSeed(t) - local prob_dist = torch.rand(n_row,n_col) - prob_dist:select(2,n_col):fill(0) --index n_col shouldn't be sampled - local n_sample = 3 - local sample_indices = torch.multinomial(prob_dist, n_sample, false) - mytester:assert(prob_dist:dim() == 2, "wrong number of prob_dist dimensions") - mytester:assert(sample_indices:size(2) == n_sample, "wrong number of samples") - for i=1,n_row do - local row_samples = {} - for j=1,n_sample do - local sample_idx = sample_indices[{i,j}] - mytester:assert( - sample_idx ~= n_col, "sampled an index with zero probability" - ) - mytester:assert( - not row_samples[sample_idx], "sampled an index twice" - ) - row_samples[sample_idx] = true - end - end - end -end -function torchtest.multinomialvector() - local n_col = 4 - local t=os.time() - torch.manualSeed(t) - local prob_dist = torch.rand(n_col) - local n_sample = n_col - local sample_indices = torch.multinomial(prob_dist, n_sample, true) - local s_dim = sample_indices:dim() - mytester:assert(s_dim == 1, "wrong number of returned dimensions: "..s_dim) - mytester:assert(prob_dist:dim() == 1, "wrong number of prob_dist dimensions") - mytester:assert(sample_indices:size(1) == n_sample, "wrong number of samples") -end -function torchtest.range() - local mx = torch.range(0,1) - local mxx = torch.Tensor() - torch.range(mxx,0,1) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.range value') - - -- Check range for non-contiguous tensors. - local x = torch.zeros(2, 3) - local y = x:narrow(2, 2, 2) - y:range(0, 3) - mytester:assertTensorEq(x, torch.Tensor{{0, 0, 1}, {0, 2, 3}}, 1e-16, - 'non-contiguous range failed') -end -function torchtest.rangenegative() - local mx = torch.Tensor({1,0}) - local mxx = torch.Tensor() - torch.range(mxx,1,0,-1) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.range value for negative step') -end -function torchtest.rangeequalbounds() - local mx = torch.Tensor({1}) - local mxx = torch.Tensor() - torch.range(mxx,1,1,-1) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.range value for equal bounds step') - torch.range(mxx,1,1,1) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.range value for equal bounds step') -end -function torchtest.rangefloat() - local mx = torch.FloatTensor():range(0.6, 0.9, 0.1) - mytester:asserteq(mx:size(1), 4, 'wrong size for FloatTensor range') - mx = torch.FloatTensor():range(1, 10, 0.3) - mytester:asserteq(mx:size(1), 31, 'wrong size for FloatTensor range') -end -function torchtest.rangedouble() - local mx = torch.DoubleTensor():range(0.6, 0.9, 0.1) - mytester:asserteq(mx:size(1), 4, 'wrong size for DoubleTensor range') - mx = torch.DoubleTensor():range(1, 10, 0.3) - mytester:asserteq(mx:size(1), 31, 'wrong size for DoubleTensor range') -end -function torchtest.randperm() - local t=os.time() - torch.manualSeed(t) - local mx = torch.randperm(msize) - local mxx = torch.Tensor() - torch.manualSeed(t) - torch.randperm(mxx,msize) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.randperm value') -end -function torchtest.reshape() - local x = torch.rand(10,13,23) - local mx = torch.reshape(x,130,23) - local mxx = torch.Tensor() - torch.reshape(mxx,x,130,23) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.reshape value') -end - -local function assertIsOrdered(order, x, mxx, ixx, task) - local areOrdered - if order == 'descending' then - areOrdered = function(a, b) return a >= b end - elseif order == 'ascending' then - areOrdered = function(a, b) return a <= b end - else - error('unknown order "' .. order .. '", must be "ascending" or "descending"') - end - - local decreasing = true - for j = 1,msize do - for k = 2,msize do - decreasing = decreasing and areOrdered(mxx[j][k-1], mxx[j][k]) - end - end - mytester:assert(decreasing, 'torch.sort (' .. order .. ') values unordered for ' .. task) - local seen = torch.ByteTensor(msize) - local indicesCorrect = true - for k = 1,msize do - seen:zero() - for j = 1,msize do - indicesCorrect = indicesCorrect and (x[k][ixx[k][j]] == mxx[k][j]) - seen[ixx[k][j]] = 1 - end - indicesCorrect = indicesCorrect and (torch.sum(seen) == msize) - end - mytester:assert(indicesCorrect, 'torch.sort (' .. order .. ') indices wrong for ' .. task) -end - -function torchtest.sortAscending() - local x = torch.rand(msize,msize) - local mx,ix = torch.sort(x) - - -- Test use of result tensor - local mxx = torch.Tensor() - local ixx = torch.LongTensor() - torch.sort(mxx,ixx,x) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.sort (ascending) value') - mytester:asserteq(maxdiff(ix,ixx),0,'torch.sort (ascending) index') - - -- Test sorting of random numbers - assertIsOrdered('ascending', x, mxx, ixx, 'random') - - mytester:assertTensorEq( - torch.sort(torch.Tensor{ 50, 40, 30, 20, 10 }), - torch.Tensor{ 10, 20, 30, 40, 50 }, - 1e-16, - "torch.sort (ascending) simple sort" - ) - -- Test that we still have proper sorting with duplicate keys - local x = torch.floor(torch.rand(msize,msize)*10) - torch.sort(mxx,ixx,x) - assertIsOrdered('ascending', x, mxx, ixx, 'random with duplicate keys') -end - -function torchtest.sortDescending() - local x = torch.rand(msize,msize) - local mx,ix = torch.sort(x,true) - - -- Test use of result tensor - local mxx = torch.Tensor() - local ixx = torch.LongTensor() - torch.sort(mxx,ixx,x,true) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.sort (descending) value') - mytester:asserteq(maxdiff(ix,ixx),0,'torch.sort (descending) index') - - -- Test sorting of random numbers - assertIsOrdered('descending', x, mxx, ixx, 'random') - - -- Test simple sort task - mytester:assertTensorEq( - torch.sort(torch.Tensor{ 10, 20, 30, 40, 50 },true), - torch.Tensor{ 50, 40, 30, 20, 10 }, - 1e-16, - "torch.sort (descending) simple sort" - ) - - -- Test that we still have proper sorting with duplicate keys - assertIsOrdered('descending', x, mxx, ixx, 'random with duplicate keys') -end - -function torchtest.topK() - local function topKViaSort(t, k, dim, dir) - local sorted, indices = t:sort(dim, dir) - return sorted:narrow(dim, 1, k), indices:narrow(dim, 1, k) - end - - local function compareTensors(t, res1, ind1, res2, ind2, dim, msg) - -- Values should be exactly equivalent - mytester:assertTensorEq(res1, res2, 0, msg) - - -- Indices might differ based on the implementation, since there is - -- no guarantee of the relative order of selection - if ind1:eq(ind2):min() == 0 then - -- To verify that the indices represent equivalent elements, - -- gather from the input using the topk indices and compare against - -- the sort indices - local vals = t:gather(dim, ind2) - mytester:assertTensorEq(res1, vals, 0, msg) - end - end - - local function compare(t, k, dim, dir, msg) - local topKVal, topKInd = t:topk(k, dim, dir, true) - local sortKVal, sortKInd = topKViaSort(t, k, dim, dir) - - compareTensors(t, sortKVal, sortKInd, topKVal, topKInd, dim, msg) - end - - local t = torch.rand(math.random(1, msize), - math.random(1, msize), - math.random(1, msize)) - - for kTries = 1, 3 do - for dimTries = 1, 3 do - for _, transpose in ipairs({true, false}) do - for _, dir in ipairs({true, false}) do - local testTensor = t - - local transposeMsg = nil - if transpose then - local dim1 = math.random(1, t:nDimension()) - local dim2 = dim1 - - while dim1 == dim2 do - dim2 = math.random(1, t:nDimension()) - end - - testTensor = t:transpose(dim1, dim2) - transposeMsg = 'transpose(' .. dim1 .. ', ' .. dim2 .. ')' - end - - local dim = math.random(1, testTensor:nDimension()) - local k = math.random(1, testTensor:size(dim)) - local msg = 'topk(' .. k .. ', ' .. dim .. ', ' .. tostring(dir) .. ', true)' - if transposeMsg then - msg = msg .. ' ' .. transposeMsg - end - - compare(testTensor, k, dim, dir, msg) - end - end - end - end -end - -function torchtest.kthvalue() - local x = torch.rand(msize, msize, msize) - local x0 = x:clone() - do - local k = math.random(1, msize) - local mx, ix = torch.kthvalue(x, k) - local mxx, ixx = torch.sort(x) - - mytester:assertTensorEq(mxx:select(3, k), mx:select(3, 1), 0, - 'torch.kthvalue value') - mytester:assertTensorEq(ixx:select(3, k), ix:select(3, 1), 0, - 'torch.kthvalue index') - end - do -- test use of result tensors - local k = math.random(1, msize) - local mx = torch.Tensor() - local ix = torch.LongTensor() - torch.kthvalue(mx, ix, x, k) - local mxx, ixx = torch.sort(x) - mytester:assertTensorEq(mxx:select(3, k), mx:select(3, 1), 0, - 'torch.kthvalue value') - mytester:assertTensorEq(ixx:select(3, k), ix:select(3, 1), 0, - 'torch.kthvalue index') - end - do -- test non-default dim - local k = math.random(1, msize) - local mx, ix = torch.kthvalue(x, k, 1) - local mxx, ixx = torch.sort(x, 1) - mytester:assertTensorEq(mxx:select(1, k), mx[1], 0, - 'torch.kthvalue value') - mytester:assertTensorEq(ixx:select(1, k), ix[1], 0, - 'torch.kthvalue index') - end - do -- non-contiguous - local y = x:narrow(2, 1, 1) - local y0 = y:clone() - local k = math.random(1, msize) - local my, ix = torch.kthvalue(y, k) - local my0, ix0 = torch.kthvalue(y0, k) - mytester:assertTensorEq(my, my0, 0, 'torch.kthvalue value') - mytester:assertTensorEq(ix, ix0, 0, 'torch.kthvalue index') - end - mytester:assertTensorEq(x, x0, 0, 'torch.kthvalue modified input') - - -- simple test case (with repetitions) - local y = torch.Tensor{3,5,4,1,1,5} - mytester:assertTensorEq(torch.kthvalue(y, 3), torch.Tensor{3}, 1e-16, - 'torch.kthvalue simple') - mytester:assertTensorEq(torch.kthvalue(y, 2), torch.Tensor{1}, 1e-16, - 'torch.kthvalue simple') -end - -function torchtest.median() - for _, msize in ipairs{155,156} do - local x = torch.rand(msize, msize) - local x0 = x:clone() - - local mx, ix = torch.median(x) - local mxx, ixx = torch.sort(x) - local ind = math.floor((msize+1)/2) - - mytester:assertTensorEq(mxx:select(2, ind), mx:select(2, 1), 0, - 'torch.median value') - mytester:assertTensorEq(ixx:select(2, ind), ix:select(2, 1), 0, - 'torch.median index') - - -- Test use of result tensor - local mr = torch.Tensor() - local ir = torch.LongTensor() - torch.median(mr, ir, x) - mytester:assertTensorEq(mr, mx, 0, 'torch.median result tensor value') - mytester:assertTensorEq(ir, ix, 0, 'torch.median result tensor index') - - -- Test non-default dim - mx, ix = torch.median(x, 1) - mxx, ixx = torch.sort(x, 1) - mytester:assertTensorEq(mxx:select(1, ind), mx[1], 0, - 'torch.median value') - mytester:assertTensorEq(ixx:select(1, ind), ix[1], 0, - 'torch.median index') - - -- input unchanged - mytester:assertTensorEq(x, x0, 0, 'torch.median modified input') - end -end - -function torchtest.mode() - local x = torch.range(1, msize * msize):reshape(msize, msize) - x:select(1, 1):fill(1) - x:select(1, 2):fill(1) - x:select(2, 1):fill(1) - x:select(2, 2):fill(1) - local x0 = x:clone() - - -- Pre-calculated results. - local res = torch.Tensor(msize):fill(1) - -- The indices are the position of the last appearance of the mode element. - local resix = torch.LongTensor(msize):fill(2) - resix[1] = msize - resix[2] = msize - - local mx, ix = torch.mode(x) - - mytester:assertTensorEq(res:view(msize, 1), mx, 0, 'torch.mode value') - mytester:assertTensorEq(resix:view(msize, 1), ix, 0, 'torch.mode index') - - -- Test use of result tensor - local mr = torch.Tensor() - local ir = torch.LongTensor() - torch.mode(mr, ir, x) - mytester:assertTensorEq(mr, mx, 0, 'torch.mode result tensor value') - mytester:assertTensorEq(ir, ix, 0, 'torch.mode result tensor index') - - -- Test non-default dim - mx, ix = torch.mode(x, 1) - mytester:assertTensorEq(res:view(1, msize), mx, 0, 'torch.mode value') - mytester:assertTensorEq(resix:view(1, msize), ix, 0, 'torch.mode index') - - local input = torch.Tensor({ - {1, 2, 2, 2, 3, 2}, - {1.5, 2, 2, 1.5, 1.5, 5}, - }) - local value, index = torch.mode(input) - local expected_value = torch.Tensor({{2}, {1.5}}) - mytester:assertTensorEq(value, expected_value) - - -- input unchanged - mytester:assertTensorEq(x, x0, 0, 'torch.mode modified input') -end - - -function torchtest.tril() - local x = torch.rand(msize,msize) - local mx = torch.tril(x) - local mxx = torch.Tensor() - torch.tril(mxx,x) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.tril value') -end -function torchtest.triu() - local x = torch.rand(msize,msize) - local mx = torch.triu(x) - local mxx = torch.Tensor() - torch.triu(mxx,x) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.tril value') -end -function torchtest.cat() - for dim = 1, 3 do - local x = torch.rand(13, msize, msize):transpose(1, dim) - local y = torch.rand(17, msize, msize):transpose(1, dim) - local mx = torch.cat(x, y, dim) - mytester:assertTensorEq(mx:narrow(dim, 1, 13), x, 0, 'torch.cat value') - mytester:assertTensorEq(mx:narrow(dim, 14, 17), y, 0, 'torch.cat value') - - local mxx = torch.Tensor() - torch.cat(mxx, x, y, dim) - mytester:assertTensorEq(mx, mxx, 0, 'torch.cat value') - - local x = torch.rand(1,2,3) - local y = torch.Tensor() - local mx = torch.cat(x,y,dim) - mytester:asserteq(mx:size(1),1,'torch.cat size') - mytester:asserteq(mx:size(2),2,'torch.cat size') - mytester:asserteq(mx:size(3),3,'torch.cat size') - mytester:assertTensorEq(mx, x, 0, 'torch.cat value') - - local x = torch.Tensor() - local y = torch.Tensor() - local mx = torch.cat(x,y,dim) - mytester:asserteq(mx:dim(),0,'torch.cat dim') - end - local x = torch.Tensor() - local y = torch.rand(1,2,3) - local mx = torch.cat(x,y) - mytester:asserteq(mx:size(1),1,'torch.cat size') - mytester:asserteq(mx:size(2),2,'torch.cat size') - mytester:asserteq(mx:size(3),3,'torch.cat size') - mytester:assertTensorEq(mx, y, 0, 'torch.cat value') - - local x = torch.Tensor() - local y = torch.Tensor() - local mx = torch.cat(x,y) - mytester:asserteq(mx:dim(),0,'torch.cat dim') -end -function torchtest.catArray() - for dim = 1, 3 do - local x = torch.rand(13, msize, msize):transpose(1, dim) - local y = torch.rand(17, msize, msize):transpose(1, dim) - local z = torch.rand(19, msize, msize):transpose(1, dim) - - local mx = torch.cat({x, y, z}, dim) - mytester:assertTensorEq(mx:narrow(dim, 1, 13), x, 0, 'torch.cat value') - mytester:assertTensorEq(mx:narrow(dim, 14, 17), y, 0, 'torch.cat value') - mytester:assertTensorEq(mx:narrow(dim, 31, 19), z, 0, 'torch.cat value') - - mytester:assertError(function() torch.cat{} end, 'torch.cat empty table') - - local mxx = torch.Tensor() - torch.cat(mxx, {x, y, z}, dim) - mytester:assertTensorEq(mx, mxx, 0, 'torch.cat value') - torch.cat(mxx:float(), {x:float(), y:float(), z:float()}, dim) - mytester:assertTensorEq(mx, mxx, 0, 'torch.cat value') - torch.cat(mxx:double(), {x:double(), y:double(), z:double()}, dim) - mytester:assertTensorEq(mx, mxx, 0, 'torch.cat value') - - local x = torch.rand(1,2,3) - local y = torch.Tensor() - local mx = torch.cat({x,y},dim) - mytester:asserteq(mx:size(1),1,'torch.cat size') - mytester:asserteq(mx:size(2),2,'torch.cat size') - mytester:asserteq(mx:size(3),3,'torch.cat size') - mytester:assertTensorEq(mx, x, 0, 'torch.cat value') - - local x = torch.Tensor() - local y = torch.Tensor() - local mx = torch.cat({x,y},dim) - mytester:asserteq(mx:dim(),0,'torch.cat dim') - end - local x = torch.Tensor() - local y = torch.rand(1,2,3) - local mx = torch.cat({x,y}) - mytester:asserteq(mx:size(1),1,'torch.cat size') - mytester:asserteq(mx:size(2),2,'torch.cat size') - mytester:asserteq(mx:size(3),3,'torch.cat size') - mytester:assertTensorEq(mx, y, 0, 'torch.cat value') - - local x = torch.Tensor() - local y = torch.Tensor() - local mx = torch.cat({x,y}) - mytester:asserteq(mx:dim(),0,'torch.cat dim') -end -function torchtest.catNoDim() - local a - local b - local c - - a = torch.Tensor(msize):uniform() - b = torch.Tensor(msize):uniform() - c = torch.cat(a, b) - mytester:assertTensorEq(c:narrow(1, 1, msize), a, 0, 'torch.cat value') - mytester:assertTensorEq(c:narrow(1, msize + 1, msize), b, 0, 'torch.cat value') - - a = torch.Tensor(1, msize):uniform() - b = torch.Tensor(1, msize):uniform() - c = torch.cat(a, b) - mytester:assertTensorEq(c:narrow(2, 1, msize), a, 0, 'torch.cat value') - mytester:assertTensorEq(c:narrow(2, msize + 1, msize), b, 0, 'torch.cat value') - - a = torch.Tensor(10, msize):uniform() - b = torch.Tensor(10, msize):uniform() - c = torch.cat(a, b) - mytester:assertTensorEq(c:narrow(2, 1, msize), a, 0, 'torch.cat value') - mytester:assertTensorEq(c:narrow(2, msize + 1, msize), b, 0, 'torch.cat value') -end -function torchtest.sin_2() - local x = torch.rand(msize,msize,msize) - local mx = torch.sin(x) - local mxx = torch.Tensor() - torch.sin(mxx,x) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.sin value') -end -function torchtest.linspace() - local from = math.random() - local to = from+math.random() - local mx = torch.linspace(from,to,137) - local mxx = torch.Tensor() - torch.linspace(mxx,from,to,137) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.linspace value') - mytester:assertError(function() torch.linspace(0,1,1) end, 'accepted 1 point between 2 distinct endpoints') - mytester:assertTensorEq(torch.linspace(0,0,1),torch.zeros(1),1e-16, 'failed to generate for torch.linspace(0,0,1)') - - -- Check linspace for generating with start > end. - mytester:assertTensorEq(torch.linspace(2,0,3), - torch.Tensor{2,1,0}, - 1e-16, - 'failed to generate for torch.linspace(2,0,3)') - - -- Check linspace for non-contiguous tensors. - local x = torch.zeros(2, 3) - local y = x:narrow(2, 2, 2) - y:linspace(0, 3, 4) - mytester:assertTensorEq(x, torch.Tensor{{0, 0, 1}, {0, 2, 3}}, 1e-16, - 'non-contiguous linspace failed') -end -function torchtest.logspace() - local from = math.random() - local to = from+math.random() - local mx = torch.logspace(from,to,137) - local mxx = torch.Tensor() - torch.logspace(mxx,from,to,137) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.logspace value') - mytester:assertError(function() torch.logspace(0,1,1) end, 'accepted 1 point between 2 distinct endpoints') - mytester:assertTensorEq(torch.logspace(0,0,1),torch.ones(1),1e-16, 'failed to generate for torch.linspace(0,0,1)') - - -- Check logspace for generating with start > end. - mytester:assertTensorEq(torch.logspace(1,0,2), - torch.Tensor{10, 1}, - 1e-16, - 'failed to generate for torch.logspace(1,0,2)') - - -- Check logspace for non-contiguous tensors. - local x = torch.zeros(2, 3) - local y = x:narrow(2, 2, 2) - y:logspace(0, 3, 4) - mytester:assertTensorEq(x, torch.Tensor{{0, 1, 10}, {0, 100, 1000}}, 1e-16, - 'non-contiguous logspace failed') -end -function torchtest.rand() - torch.manualSeed(123456) - local mx = torch.rand(msize,msize) - local mxx = torch.Tensor() - torch.manualSeed(123456) - torch.rand(mxx,msize,msize) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.rand value') -end -function torchtest.randn() - torch.manualSeed(123456) - local mx = torch.randn(msize,msize) - local mxx = torch.Tensor() - torch.manualSeed(123456) - torch.randn(mxx,msize,msize) - mytester:asserteq(maxdiff(mx,mxx),0,'torch.randn value') -end -function torchtest.gesv() - if not torch.gesv then return end - local a=torch.Tensor({{6.80, -2.11, 5.66, 5.97, 8.23}, - {-6.05, -3.30, 5.36, -4.44, 1.08}, - {-0.45, 2.58, -2.70, 0.27, 9.04}, - {8.32, 2.71, 4.35, -7.17, 2.14}, - {-9.67, -5.14, -7.26, 6.08, -6.87}}):t() - local b=torch.Tensor({{4.02, 6.19, -8.22, -7.57, -3.03}, - {-1.56, 4.00, -8.67, 1.75, 2.86}, - {9.81, -4.09, -4.57, -8.61, 8.99}}):t() - local mx = torch.gesv(b,a) - mytester:assertlt(b:dist(a*mx),1e-12,'torch.gesv') - local ta = torch.Tensor() - local tb = torch.Tensor() - local mxx = torch.gesv(tb,ta,b,a) - local mxxx = torch.gesv(b,a,b,a) - mytester:asserteq(maxdiff(mx,tb),0,'torch.gesv value temp') - mytester:asserteq(maxdiff(mx,b),0,'torch.gesv value flag') - mytester:asserteq(maxdiff(mx,mxx),0,'torch.gesv value out1') - mytester:asserteq(maxdiff(mx,mxxx),0,'torch.gesv value out2') -end -function torchtest.gesv_reuse() - if not torch.gesv then return end - local a=torch.Tensor({{6.80, -2.11, 5.66, 5.97, 8.23}, - {-6.05, -3.30, 5.36, -4.44, 1.08}, - {-0.45, 2.58, -2.70, 0.27, 9.04}, - {8.32, 2.71, 4.35, -7.17, 2.14}, - {-9.67, -5.14, -7.26, 6.08, -6.87}}):t() - local b=torch.Tensor({{4.02, 6.19, -8.22, -7.57, -3.03}, - {-1.56, 4.00, -8.67, 1.75, 2.86}, - {9.81, -4.09, -4.57, -8.61, 8.99}}):t() - local mx = torch.gesv(b,a) - local ta = torch.Tensor() - local tb = torch.Tensor() - torch.gesv(tb,ta,b,a) - mytester:asserteq(maxdiff(mx,tb),0,'torch.gesv value temp') - torch.gesv(tb,ta,b,a) - mytester:asserteq(maxdiff(mx,tb),0,'torch.gesv value reuse') -end -function torchtest.trtrs() - if not torch.trtrs then return end - local a=torch.Tensor({{6.80, -2.11, 5.66, 5.97, 8.23}, - {-6.05, -3.30, 5.36, -4.44, 1.08}, - {-0.45, 2.58, -2.70, 0.27, 9.04}, - {8.32, 2.71, 4.35, -7.17, 2.14}, - {-9.67, -5.14, -7.26, 6.08, -6.87}}):t() - local b=torch.Tensor({{4.02, 6.19, -8.22, -7.57, -3.03}, - {-1.56, 4.00, -8.67, 1.75, 2.86}, - {9.81, -4.09, -4.57, -8.61, 8.99}}):t() - - local U = torch.triu(a) - local L = torch.tril(a) - - -- solve Ux = b - local x = torch.trtrs(b, U) - mytester:assertlt(b:dist(U*x),1e-12,'torch.trtrs') - x = torch.trtrs(b, U, 'U', 'N', 'N') - mytester:assertlt(b:dist(U*x),1e-12,'torch.trtrs') - - -- solve Lx = b - x = torch.trtrs(b, L, 'L') - mytester:assertlt(b:dist(L*x),1e-12,'torch.trtrs') - x = torch.trtrs(b, L, 'L', 'N', 'N') - mytester:assertlt(b:dist(L*x),1e-12,'torch.trtrs') - - -- solve U'x = b - x = torch.trtrs(b, U, 'U', 'T') - mytester:assertlt(b:dist(U:t()*x),1e-12,'torch.trtrs') - x = torch.trtrs(b, U, 'U', 'T', 'N') - mytester:assertlt(b:dist(U:t()*x),1e-12,'torch.trtrs') - - -- solve U'x = b by manual transposition - y = torch.trtrs(b, U:t(), 'L', 'N') - mytester:assertlt(x:dist(y),1e-12,'torch.trtrs') - - -- solve L'x = b - x = torch.trtrs(b, L, 'L', 'T') - mytester:assertlt(b:dist(L:t()*x),1e-12,'torch.trtrs') - x = torch.trtrs(b, L, 'L', 'T', 'N') - mytester:assertlt(b:dist(L:t()*x),1e-12,'torch.trtrs') - - -- solve L'x = b by manual transposition - y = torch.trtrs(b, L:t(), 'U', 'N') - mytester:assertlt(x:dist(y),1e-12,'torch.trtrs') -end -function torchtest.trtrs_reuse() - if not torch.trtrs then return end - local a=torch.Tensor({{6.80, -2.11, 5.66, 5.97, 8.23}, - {-6.05, -3.30, 5.36, -4.44, 1.08}, - {-0.45, 2.58, -2.70, 0.27, 9.04}, - {8.32, 2.71, 4.35, -7.17, 2.14}, - {-9.67, -5.14, -7.26, 6.08, -6.87}}):t() - local b=torch.Tensor({{4.02, 6.19, -8.22, -7.57, -3.03}, - {-1.56, 4.00, -8.67, 1.75, 2.86}, - {9.81, -4.09, -4.57, -8.61, 8.99}}):t() - local mx = torch.trtrs(b,a) - local ta = torch.Tensor() - local tb = torch.Tensor() - torch.trtrs(tb,ta,b,a) - mytester:asserteq(maxdiff(mx,tb),0,'torch.trtrs value temp') - tb:zero() - torch.trtrs(tb,ta,b,a) - mytester:asserteq(maxdiff(mx,tb),0,'torch.trtrs value reuse') -end -function torchtest.gels_uniquely_determined() - if not torch.gels then return end - local expectedNorm = 0 - local a=torch.Tensor({{ 1.44, -9.96, -7.55, 8.34}, - {-7.84, -0.28, 3.24, 8.09}, - {-4.39, -3.24, 6.27, 5.28}, - {4.53, 3.83, -6.64, 2.06}}):t() - local b=torch.Tensor({{8.58, 8.26, 8.48, -5.28}, - {9.35, -4.43, -0.70, -0.26}}):t() - local a_copy = a:clone() - local b_copy = b:clone() - local mx = torch.gels(b,a) - mytester:asserteq(maxdiff(a,a_copy),0,'torch.gels changed a') - mytester:asserteq(maxdiff(b,b_copy),0,'torch.gels changed b') - mytester:assertalmosteq((torch.mm(a,mx)-b):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - - local ta = torch.Tensor() - local tb = torch.Tensor() - local mxx = torch.gels(tb,ta,b,a) - mytester:asserteq(maxdiff(a,a_copy),0,'torch.gels changed a') - mytester:asserteq(maxdiff(b,b_copy),0,'torch.gels changed b') - mytester:assertalmosteq((torch.mm(a,tb)-b):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - - local mxxx = torch.gels(b,a,b,a) - mytester:assertalmosteq((torch.mm(a_copy,b)-b_copy):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - mytester:asserteq(maxdiff(mx,tb),0,'torch.gels value temp') - mytester:asserteq(maxdiff(mx,b),0,'torch.gels value flag') - mytester:asserteq(maxdiff(mx,mxx),0,'torch.gels value out1') - mytester:asserteq(maxdiff(mx,mxxx),0,'torch.gels value out2') -end -function torchtest.gels_reuse() - if not torch.gels then return end - local expectedNorm = 0 - local a=torch.Tensor({{ 1.44, -9.96, -7.55, 8.34}, - {-7.84, -0.28, 3.24, 8.09}, - {-4.39, -3.24, 6.27, 5.28}, - {4.53, 3.83, -6.64, 2.06}}):t() - local b=torch.Tensor({{8.58, 8.26, 8.48, -5.28}, - {9.35, -4.43, -0.70, -0.26}}):t() - local ta = torch.Tensor() - local tb = torch.Tensor() - torch.gels(tb,ta,b,a) - mytester:assertalmosteq((torch.mm(a,tb)-b):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - torch.gels(tb,ta,b,a) - mytester:assertalmosteq((torch.mm(a,tb)-b):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - torch.gels(tb,ta,b,a) - mytester:assertalmosteq((torch.mm(a,tb)-b):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') -end -function torchtest.gels_overdetermined() - if not torch.gels then return end - local expectedNorm = 17.390200628863 - local a=torch.Tensor({{ 1.44, -9.96, -7.55, 8.34, 7.08, -5.45}, - {-7.84, -0.28, 3.24, 8.09, 2.52, -5.70}, - {-4.39, -3.24, 6.27, 5.28, 0.74, -1.19}, - {4.53, 3.83, -6.64, 2.06, -2.47, 4.70}}):t() - local b=torch.Tensor({{8.58, 8.26, 8.48, -5.28, 5.72, 8.93}, - {9.35, -4.43, -0.70, -0.26, -7.36, -2.52}}):t() - local a_copy = a:clone() - local b_copy = b:clone() - local mx = torch.gels(b,a) - mytester:asserteq(maxdiff(a,a_copy),0,'torch.gels changed a') - mytester:asserteq(maxdiff(b,b_copy),0,'torch.gels changed b') - mytester:assertalmosteq((torch.mm(a, mx)-b):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - - local ta = torch.Tensor() - local tb = torch.Tensor() - local mxx = torch.gels(tb,ta,b,a) - mytester:asserteq(maxdiff(a,a_copy),0,'torch.gels changed a') - mytester:asserteq(maxdiff(b,b_copy),0,'torch.gels changed b') - mytester:assertalmosteq((torch.mm(a,tb)-b):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - - local mxxx = torch.gels(b,a,b,a) - mytester:assertalmosteq((torch.mm(a_copy,b)-b_copy):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - mytester:asserteq(maxdiff(mx,tb),0,'torch.gels value temp') - mytester:asserteq(maxdiff(mx,b),0,'torch.gels value flag') - mytester:asserteq(maxdiff(mx,mxx),0,'torch.gels value out1') - mytester:asserteq(maxdiff(mx,mxxx),0,'torch.gels value out2') -end -function torchtest.gels_underdetermined() - if not torch.gels then return end - local expectedNorm = 0 - local a=torch.Tensor({{ 1.44, -9.96, -7.55}, - {-7.84, -0.28, 3.24}, - {-4.39, -3.24, 6.27}, - {4.53, 3.83, -6.64}}):t() - local b=torch.Tensor({{8.58, 8.26, 8.48}, - {9.35, -4.43, -0.70}}):t() - - local a_copy = a:clone() - local b_copy = b:clone() - local mx = torch.gels(b,a) - mytester:asserteq(maxdiff(a,a_copy),0,'torch.gels changed a') - mytester:asserteq(maxdiff(b,b_copy),0,'torch.gels changed b') - mytester:assertalmosteq((torch.mm(a,mx)-b):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - - local ta = torch.Tensor() - local tb = torch.Tensor() - local mxx = torch.gels(tb,ta,b,a) - mytester:asserteq(maxdiff(a,a_copy),0,'torch.gels changed a') - mytester:asserteq(maxdiff(b,b_copy),0,'torch.gels changed b') - mytester:assertalmosteq((torch.mm(a,tb)-b):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - - local mxxx = torch.gels(b,a,b,a) - mytester:assertalmosteq((torch.mm(a_copy,b)-b_copy):norm(), expectedNorm, 1e-8, 'torch.gels wrong answer') - mytester:asserteq(maxdiff(mx,tb),0,'torch.gels value temp') - mytester:asserteq(maxdiff(mx,b),0,'torch.gels value flag') - mytester:asserteq(maxdiff(mx,mxx),0,'torch.gels value out1') - mytester:asserteq(maxdiff(mx,mxxx),0,'torch.gels value out2') -end -function torchtest.eig() - if not torch.eig then return end - local a=torch.Tensor({{ 1.96, 0.00, 0.00, 0.00, 0.00}, - {-6.49, 3.80, 0.00, 0.00, 0.00}, - {-0.47, -6.39, 4.17, 0.00, 0.00}, - {-7.20, 1.50, -1.51, 5.70, 0.00}, - {-0.65, -6.34, 2.67, 1.80, -7.10}}):t():clone() - local e = torch.eig(a) - local ee,vv = torch.eig(a,'V') - local te = torch.Tensor() - local tv = torch.Tensor() - local eee,vvv = torch.eig(te,tv,a,'V') - mytester:assertlt(maxdiff(e,ee),1e-12,'torch.eig value') - mytester:assertlt(maxdiff(ee,eee),1e-12,'torch.eig value') - mytester:assertlt(maxdiff(ee,te),1e-12,'torch.eig value') - mytester:assertlt(maxdiff(vv,vvv),1e-12,'torch.eig value') - mytester:assertlt(maxdiff(vv,tv),1e-12,'torch.eig value') -end -function torchtest.eig_reuse() - if not torch.eig then return end - local X = torch.randn(4,4) - X = X:t()*X - local e, v = torch.zeros(4,2), torch.zeros(4,4) - torch.eig(e, v, X,'V') - local Xhat = v * torch.diag(e:select(2,1)) * v:t() - mytester:assertTensorEq(X, Xhat, 1e-8, 'VeV\' wrong') - mytester:assert(not v:isContiguous(), 'V is contiguous') - - torch.eig(e, v, X, 'V') - local Xhat = torch.mm(v, torch.mm(e:select(2,1):diag(), v:t())) - mytester:assertTensorEq(X, Xhat, 1e-8, 'VeV\' wrong') - mytester:assert(not v:isContiguous(), 'V is contiguous') -end -function torchtest.eig_noncontig() - if not torch.eig then return end - local X = torch.randn(4,4) - X = X:t()*X - local e = torch.zeros(4,2,2)[{ {}, 2, {} }] - local v = torch.zeros(4,2,4)[{ {}, 2, {} }] - mytester:assert(not v:isContiguous(), 'V is contiguous') - mytester:assert(not e:isContiguous(), 'E is contiguous') - torch.eig(e, v, X,'V') - local Xhat = v * torch.diag(e:select(2,1)) * v:t() - mytester:assertTensorEq(X, Xhat, 1e-8, 'VeV\' wrong') -end -function torchtest.test_symeig() - if not torch.symeig then return end - local xval = torch.rand(100,3) - local cov = torch.mm(xval:t(), xval) - local rese = torch.zeros(3) - local resv = torch.zeros(3,3) - - -- First call to symeig - mytester:assert(resv:isContiguous(), 'resv is not contiguous') -- PASS - torch.symeig(rese, resv, cov:clone(), 'V') - local ahat = resv*torch.diag(rese)*resv:t() - mytester:assertTensorEq(cov, ahat, 1e-8, 'VeV\' wrong') -- PASS - - -- Second call to symeig - mytester:assert(not resv:isContiguous(), 'resv is contiguous') -- FAIL - torch.symeig(rese, resv, cov:clone(), 'V') - local ahat = torch.mm(torch.mm(resv, torch.diag(rese)), resv:t()) - mytester:assertTensorEq(cov, ahat, 1e-8, 'VeV\' wrong') -- FAIL -end -function torchtest.symeig_noncontig() - if not torch.symeig then return end - local X = torch.rand(5,5) - X = X:t()*X - local e = torch.zeros(4,2):select(2,2) - local v = torch.zeros(4,2,4)[{ {}, 2, {} }] - mytester:assert(not v:isContiguous(), 'V is contiguous') - mytester:assert(not e:isContiguous(), 'E is contiguous') - torch.symeig(e, v, X,'V') - local Xhat = v * torch.diag(e) * v:t() - mytester:assertTensorEq(X, Xhat, 1e-8, 'VeV\' wrong') -end -function torchtest.svd() - if not torch.svd then return end - local a=torch.Tensor({{8.79, 6.11, -9.15, 9.57, -3.49, 9.84}, - {9.93, 6.91, -7.93, 1.64, 4.02, 0.15}, - {9.83, 5.04, 4.86, 8.83, 9.80, -8.99}, - {5.45, -0.27, 4.85, 0.74, 10.00, -6.02}, - {3.16, 7.98, 3.01, 5.80, 4.27, -5.31}}):t():clone() - local u,s,v = torch.svd(a) - local uu = torch.Tensor() - local ss = torch.Tensor() - local vv = torch.Tensor() - local uuu,sss,vvv = torch.svd(uu,ss,vv,a) - mytester:asserteq(maxdiff(u,uu),0,'torch.svd') - mytester:asserteq(maxdiff(u,uuu),0,'torch.svd') - mytester:asserteq(maxdiff(s,ss),0,'torch.svd') - mytester:asserteq(maxdiff(s,sss),0,'torch.svd') - mytester:asserteq(maxdiff(v,vv),0,'torch.svd') - mytester:asserteq(maxdiff(v,vvv),0,'torch.svd') -end -function torchtest.svd_reuse() - if not torch.svd then return end - local X = torch.randn(4,4) - local U, S, V = torch.svd(X) - local Xhat = torch.mm(U, torch.mm(S:diag(), V:t())) - mytester:assertTensorEq(X, Xhat, 1e-8, 'USV\' wrong') - - mytester:assert(not U:isContiguous(), 'U is contiguous') - torch.svd(U, S, V, X) - local Xhat = torch.mm(U, torch.mm(S:diag(), V:t())) - mytester:assertTensorEq(X, Xhat, 1e-8, 'USV\' wrong') -end -function torchtest.svd_noncontig() - if not torch.svd then return end - local X = torch.randn(5,5) - local U = torch.zeros(5,2,5)[{ {}, 2, {} }] - local S = torch.zeros(5,2)[{ {}, 2 }] - local V = torch.zeros(5,2,5)[{ {}, 2, {} }] - - mytester:assert(not U:isContiguous(), 'U is contiguous') - mytester:assert(not S:isContiguous(), 'S is contiguous') - mytester:assert(not V:isContiguous(), 'V is contiguous') - torch.svd(U, S, V, X) - local Xhat = torch.mm(U, torch.mm(S:diag(), V:t())) - mytester:assertTensorEq(X, Xhat, 1e-8, 'USV\' wrong') -end -function torchtest.inverse() - if not torch.inverse then return end - local M = torch.randn(5,5) - local MI = torch.inverse(M) - local E = torch.eye(5) - mytester:assert(not MI:isContiguous(), 'MI is contiguous') - mytester:assertalmosteq(maxdiff(E,torch.mm(M,MI)), 0, 1e-8, 'inverse value') - mytester:assertalmosteq(maxdiff(E,torch.mm(MI,M)), 0, 1e-8, 'inverse value') - - local MII = torch.Tensor(5,5) - torch.inverse(MII, M) - mytester:assert(not MII:isContiguous(), 'MII is contiguous') - mytester:asserteq(maxdiff(MII, MI), 0, 'inverse value in-place') - -- second call, now that MII is transposed - torch.inverse(MII, M) - mytester:assert(not MII:isContiguous(), 'MII is contiguous') - mytester:asserteq(maxdiff(MII, MI), 0, 'inverse value in-place') -end -function torchtest.conv2() - local x = torch.rand(math.floor(torch.uniform(50,100)),math.floor(torch.uniform(50,100))) - local k = torch.rand(math.floor(torch.uniform(10,20)),math.floor(torch.uniform(10,20))) - local imvc = torch.conv2(x,k) - local imvc2 = torch.conv2(x,k,'V') - local imfc = torch.conv2(x,k,'F') - - local ki = k:clone(); - local ks = k:storage() - local kis = ki:storage() - for i=ks:size(),1,-1 do kis[ks:size()-i+1]=ks[i] end - local imvx = torch.xcorr2(x,ki) - local imvx2 = torch.xcorr2(x,ki,'V') - local imfx = torch.xcorr2(x,ki,'F') - - mytester:asserteq(maxdiff(imvc,imvc2),0,'torch.conv2') - mytester:asserteq(maxdiff(imvc,imvx),0,'torch.conv2') - mytester:asserteq(maxdiff(imvc,imvx2),0,'torch.conv2') - mytester:asserteq(maxdiff(imfc,imfx),0,'torch.conv2') - mytester:assertlt(math.abs(x:dot(x)-torch.xcorr2(x,x)[1][1]),1e-10,'torch.conv2') - - local xx = torch.Tensor(2,x:size(1),x:size(2)) - xx[1]:copy(x) - xx[2]:copy(x) - local kk = torch.Tensor(2,k:size(1),k:size(2)) - kk[1]:copy(k) - kk[2]:copy(k) - - local immvc = torch.conv2(xx,kk) - local immvc2 = torch.conv2(xx,kk,'V') - local immfc = torch.conv2(xx,kk,'F') - - mytester:asserteq(maxdiff(immvc[1],immvc[2]),0,'torch.conv2') - mytester:asserteq(maxdiff(immvc[1],imvc),0,'torch.conv2') - mytester:asserteq(maxdiff(immvc2[1],imvc2),0,'torch.conv2') - mytester:asserteq(maxdiff(immfc[1],immfc[2]),0,'torch.conv2') - mytester:asserteq(maxdiff(immfc[1],imfc),0,'torch.conv2') -end - -function torchtest.conv3() - local x = torch.rand(math.floor(torch.uniform(20,40)), - math.floor(torch.uniform(20,40)), - math.floor(torch.uniform(20,40))) - local k = torch.rand(math.floor(torch.uniform(5,10)), - math.floor(torch.uniform(5,10)), - math.floor(torch.uniform(5,10))) - local imvc = torch.conv3(x,k) - local imvc2 = torch.conv3(x,k,'V') - local imfc = torch.conv3(x,k,'F') - - local ki = k:clone(); - local ks = k:storage() - local kis = ki:storage() - for i=ks:size(),1,-1 do kis[ks:size()-i+1]=ks[i] end - local imvx = torch.xcorr3(x,ki) - local imvx2 = torch.xcorr3(x,ki,'V') - local imfx = torch.xcorr3(x,ki,'F') - - mytester:asserteq(maxdiff(imvc,imvc2),0,'torch.conv3') - mytester:asserteq(maxdiff(imvc,imvx),0,'torch.conv3') - mytester:asserteq(maxdiff(imvc,imvx2),0,'torch.conv3') - mytester:asserteq(maxdiff(imfc,imfx),0,'torch.conv3') - mytester:assertlt(math.abs(x:dot(x)-torch.xcorr3(x,x)[1][1][1]),4*1e-10,'torch.conv3') - - local xx = torch.Tensor(2,x:size(1),x:size(2),x:size(3)) - xx[1]:copy(x) - xx[2]:copy(x) - local kk = torch.Tensor(2,k:size(1),k:size(2),k:size(3)) - kk[1]:copy(k) - kk[2]:copy(k) - - local immvc = torch.conv3(xx,kk) - local immvc2 = torch.conv3(xx,kk,'V') - local immfc = torch.conv3(xx,kk,'F') - - mytester:asserteq(maxdiff(immvc[1],immvc[2]),0,'torch.conv3') - mytester:asserteq(maxdiff(immvc[1],imvc),0,'torch.conv3') - mytester:asserteq(maxdiff(immvc2[1],imvc2),0,'torch.conv3') - mytester:asserteq(maxdiff(immfc[1],immfc[2]),0,'torch.conv3') - mytester:asserteq(maxdiff(immfc[1],imfc),0,'torch.conv3') -end - -function torchtest.xcorr3_xcorr2_eq() - local ix = math.floor(torch.uniform(20,40)) - local iy = math.floor(torch.uniform(20,40)) - local iz = math.floor(torch.uniform(20,40)) - local kx = math.floor(torch.uniform(5,10)) - local ky = math.floor(torch.uniform(5,10)) - local kz = math.floor(torch.uniform(5,10)) - - local x = torch.rand(ix,iy,iz) - local k = torch.rand(kx,ky,kz) - - local o3 = torch.xcorr3(x,k) - local o32 = torch.zeros(o3:size()) - - for i=1,o3:size(1) do - for j=1,k:size(1) do - o32[i]:add(torch.xcorr2(x[i+j-1],k[j])) - end - end - - mytester:assertlt(maxdiff(o3,o32),precision,'torch.conv3_conv2_eq') -end - -function torchtest.fxcorr3_fxcorr2_eq() - local ix = math.floor(torch.uniform(20,40)) - local iy = math.floor(torch.uniform(20,40)) - local iz = math.floor(torch.uniform(20,40)) - local kx = math.floor(torch.uniform(5,10)) - local ky = math.floor(torch.uniform(5,10)) - local kz = math.floor(torch.uniform(5,10)) - - local x = torch.rand(ix,iy,iz) - local k = torch.rand(kx,ky,kz) - - local o3 = torch.xcorr3(x,k,'F') - - local o32 = torch.zeros(o3:size()) - - for i=1,x:size(1) do - for j=1,k:size(1) do - o32[i+j-1]:add(torch.xcorr2(x[i],k[k:size(1)-j + 1],'F')) - end - end - - mytester:assertlt(maxdiff(o3,o32),precision,'torch.conv3_conv2_eq') -end - -function torchtest.conv3_conv2_eq() - local ix = math.floor(torch.uniform(20,40)) - local iy = math.floor(torch.uniform(20,40)) - local iz = math.floor(torch.uniform(20,40)) - local kx = math.floor(torch.uniform(5,10)) - local ky = math.floor(torch.uniform(5,10)) - local kz = math.floor(torch.uniform(5,10)) - - local x = torch.rand(ix,iy,iz) - local k = torch.rand(kx,ky,kz) - - local o3 = torch.conv3(x,k) - local o32 = torch.zeros(o3:size()) - - for i=1,o3:size(1) do - for j=1,k:size(1) do - o32[i]:add(torch.conv2(x[i+j-1],k[k:size(1)-j+1])) - end - end - - mytester:assertlt(maxdiff(o3,o32),precision,'torch.conv3_conv2_eq') -end - -function torchtest.fconv3_fconv2_eq() - local ix = math.floor(torch.uniform(20,40)) - local iy = math.floor(torch.uniform(20,40)) - local iz = math.floor(torch.uniform(20,40)) - local kx = math.floor(torch.uniform(5,10)) - local ky = math.floor(torch.uniform(5,10)) - local kz = math.floor(torch.uniform(5,10)) - - local x = torch.rand(ix,iy,iz) - local k = torch.rand(kx,ky,kz) - - local o3 = torch.conv3(x,k,'F') - - local o32 = torch.zeros(o3:size()) - - for i=1,x:size(1) do - for j=1,k:size(1) do - o32[i+j-1]:add(torch.conv2(x[i],k[j],'F')) - end - end - - mytester:assertlt(maxdiff(o3,o32),precision,'torch.conv3_conv2_eq') -end - -function torchtest.logical() - local x = torch.rand(100,100)*2-1; - local xx = x:clone() - - local xgt = torch.gt(x,1) - local xlt = torch.lt(x,1) - - local xeq = torch.eq(x,1) - local xne = torch.ne(x,1) - - local neqs = xgt+xlt - local all = neqs + xeq - mytester:asserteq(neqs:sum(), xne:sum(), 'torch.logical') - mytester:asserteq(x:nElement(),all:double():sum() , 'torch.logical') -end - -function torchtest.RNGState() - local state = torch.getRNGState() - local stateCloned = state:clone() - local before = torch.rand(1000) - - mytester:assert(state:ne(stateCloned):long():sum() == 0, 'getRNGState should have value semantics, but appears to have reference semantics') - - torch.setRNGState(state) - local after = torch.rand(1000) - mytester:assertTensorEq(before, after, 1e-16, 'getRNGState/setRNGState not generating same sequence') -end - -function torchtest.RNGStateAliasing() - torch.manualSeed(1) - local unused = torch.uniform() - - -- Fork the random number stream at this point - local gen = torch.Generator() - torch.setRNGState(gen, torch.getRNGState()) - - local target_value = torch.rand(1000) - --Dramatically alter the internal state of the main generator - local also_unused = torch.rand(100000) - local forked_value = torch.rand(gen, 1000) - mytester:assertTensorEq(target_value, forked_value, 1e-16, "RNG has not forked correctly.") -end - -function torchtest.serializeGenerator() - local generator = torch.Generator() - torch.manualSeed(generator, 123) - local differentGenerator = torch.Generator() - torch.manualSeed(differentGenerator, 124) - local serializedGenerator = torch.serialize(generator) - local deserializedGenerator = torch.deserialize(serializedGenerator) - local generated = torch.random(generator) - local differentGenerated = torch.random(differentGenerator) - local deserializedGenerated = torch.random(deserializedGenerator) - mytester:asserteq(generated, deserializedGenerated, 'torch.Generator changed internal state after being serialized') - mytester:assertne(generated, differentGenerated, 'Generators with different random seed should not produce the same output') -end - -function torchtest.testBoxMullerState() - torch.manualSeed(123) - local odd_number = 101 - local seeded = torch.randn(odd_number) - local state = torch.getRNGState() - local midstream = torch.randn(odd_number) - torch.setRNGState(state) - local repeat_midstream = torch.randn(odd_number) - torch.manualSeed(123) - local reseeded = torch.randn(odd_number) - mytester:assertTensorEq(midstream, repeat_midstream, 1e-16, 'getRNGState/setRNGState not generating same sequence of normally distributed numbers') - mytester:assertTensorEq(seeded, reseeded, 1e-16, 'repeated calls to manualSeed not generating same sequence of normally distributed numbers') -end - -function torchtest.testCholesky() - local x = torch.rand(10,10) - local A = torch.mm(x, x:t()) - - ---- Default Case - local C = torch.potrf(A) - local B = torch.mm(C:t(), C) - mytester:assertTensorEq(A, B, 1e-14, 'potrf did not allow rebuilding the original matrix') - - ---- Test Upper Triangular - local U = torch.potrf(A, 'U') - B = torch.mm(U:t(), U) - mytester:assertTensorEq(A, B, 1e-14, 'potrf (upper) did not allow rebuilding the original matrix') - - ---- Test Lower Triangular - local L = torch.potrf(A, 'L') - B = torch.mm(L, L:t()) - mytester:assertTensorEq(A, B, 1e-14, 'potrf (lower) did not allow rebuilding the original matrix') -end - -function torchtest.potrs() - if not torch.potrs then return end - local a=torch.Tensor({{6.80, -2.11, 5.66, 5.97, 8.23}, - {-6.05, -3.30, 5.36, -4.44, 1.08}, - {-0.45, 2.58, -2.70, 0.27, 9.04}, - {8.32, 2.71, 4.35, -7.17, 2.14}, - {-9.67, -5.14, -7.26, 6.08, -6.87}}):t() - local b=torch.Tensor({{4.02, 6.19, -8.22, -7.57, -3.03}, - {-1.56, 4.00, -8.67, 1.75, 2.86}, - {9.81, -4.09, -4.57, -8.61, 8.99}}):t() - - ---- Make sure 'a' is symmetric PSD - a = torch.mm(a, a:t()) - - ---- Upper Triangular Test - local U = torch.potrf(a, 'U') - local x = torch.potrs(b, U, 'U') - mytester:assertlt(b:dist(a*x),1e-12,"torch.potrs; uplo='U'") - - ---- Lower Triangular Test - local L = torch.potrf(a, 'L') - x = torch.potrs(b, L, 'L') - mytester:assertlt(b:dist(a*x),1e-12,"torch.potrs; uplo='L") -end - -function torchtest.potri() - if not torch.potrs then return end - local a=torch.Tensor({{6.80, -2.11, 5.66, 5.97, 8.23}, - {-6.05, -3.30, 5.36, -4.44, 1.08}, - {-0.45, 2.58, -2.70, 0.27, 9.04}, - {8.32, 2.71, 4.35, -7.17, 2.14}, - {-9.67, -5.14, -7.26, 6.08, -6.87}}):t() - - ---- Make sure 'a' is symmetric PSD - a = torch.mm(a, a:t()) - - ---- Compute inverse directly - local inv0 = torch.inverse(a) - - ---- Default case - local chol = torch.potrf(a) - local inv1 = torch.potri(chol) - mytester:assertlt(inv0:dist(inv1),1e-12,"torch.potri; uplo=''") - - ---- Upper Triangular Test - chol = torch.potrf(a, 'U') - inv1 = torch.potri(chol, 'U') - mytester:assertlt(inv0:dist(inv1),1e-12,"torch.potri; uplo='U'") - - ---- Lower Triangular Test - chol = torch.potrf(a, 'L') - inv1 = torch.potri(chol, 'L') - mytester:assertlt(inv0:dist(inv1),1e-12,"torch.potri; uplo='L'") -end - -function torchtest.pstrf() - local function checkPsdCholesky(a, uplo, inplace) - local u, piv, args, a_reconstructed - if inplace then - u = torch.Tensor(a:size()) - piv = torch.IntTensor(a:size(1)) - args = {u, piv, a} - else - args = {a} - end - - if uplo then table.insert(args, uplo) end - - u, piv = torch.pstrf(unpack(args)) - - if uplo == 'L' then - a_reconstructed = torch.mm(u, u:t()) - else - a_reconstructed = torch.mm(u:t(), u) - end - - piv = piv:long() - local a_permuted = a:index(1, piv):index(2, piv) - mytester:assertTensorEq(a_permuted, a_reconstructed, 1e-14, - 'torch.pstrf did not allow rebuilding the original matrix;' .. - 'uplo=' .. tostring(uplo)) - end - - local dimensions = { {5, 1}, {5, 3}, {5, 5}, {10, 10} } - for _, dim in pairs(dimensions) do - local m = torch.Tensor(unpack(dim)):uniform() - local a = torch.mm(m, m:t()) - -- add a small number to the diagonal to make the matrix numerically positive semidefinite - for i = 1, m:size(1) do - a[i][i] = a[i][i] + 1e-7 - end - checkPsdCholesky(a, nil, false) - checkPsdCholesky(a, 'U', false) - checkPsdCholesky(a, 'L', false) - checkPsdCholesky(a, nil, true) - checkPsdCholesky(a, 'U', true) - checkPsdCholesky(a, 'L', true) - end -end - -function torchtest.testNumel() - local b = torch.ByteTensor(3, 100, 100) - mytester:asserteq(b:nElement(), 3*100*100, "nElement not right") - mytester:asserteq(b:numel(), 3*100*100, "numel not right") -end - - --- Generate a tensor of size `size` whose values are ascending integers from --- `start` (or 1, if `start is not given) -local function consecutive(size, start) - local sequence = torch.ones(torch.Tensor(size):prod(1)[1]):cumsum(1) - if start then - sequence:add(start - 1) - end - return sequence:resize(unpack(size)) -end - -function torchtest.index() - local badIndexMsg = "Lookup with valid index should return correct result" - local reference = consecutive{3, 3, 3} - mytester:assertTensorEq(reference[1], consecutive{3, 3}, 1e-16, badIndexMsg) - mytester:assertTensorEq(reference[2], consecutive({3, 3}, 10), 1e-16, badIndexMsg) - mytester:assertTensorEq(reference[3], consecutive({3, 3}, 19), 1e-16, badIndexMsg) - mytester:assertTensorEq(reference[{1}], consecutive{3, 3}, 1e-16, badIndexMsg) - mytester:assertTensorEq(reference[{2}], consecutive({3, 3}, 10), 1e-16, badIndexMsg) - mytester:assertTensorEq(reference[{3}], consecutive({3, 3}, 19), 1e-16, badIndexMsg) - mytester:assertTensorEq(reference[{1,2}], consecutive({3}, 4), 1e-16, badIndexMsg) - mytester:assertTensorEq(reference[{{1,2}}], consecutive({2, 3, 3}), 1e-16, badIndexMsg) - mytester:asserteq(reference[{3, 3, 3}], 27, badIndexMsg) - mytester:assertTensorEq(reference[{}], consecutive{3, 3, 3}, 1e-16, badIndexMsg) - - local shouldErrorMsg = "Lookup with too many indices should error" - mytester:assertError(function() return reference[{1, 1, 1, 1}] end, shouldErrorMsg) - mytester:assertError(function() return reference[{1, 1, 1, {1, 1}}] end, shouldErrorMsg) - mytester:assertError(function() return reference[{3, 3, 3, 3, 3, 3, 3, 3}] end, shouldErrorMsg) -end - -function torchtest.newIndex() - local badIndexMsg = "Assignment to valid index should produce correct result" - local reference = consecutive{3, 3, 3} - -- This relies on __index__() being correct - but we have separate tests for that - local function checkPartialAssign(index) - local reference = torch.zeros(3, 3, 3) - reference[index] = consecutive{3, 3, 3}[index] - mytester:assertTensorEq(reference[index], consecutive{3, 3, 3}[index], 1e-16, badIndexMsg) - reference[index] = 0 - mytester:assertTensorEq(reference, torch.zeros(3, 3, 3), 1e-16, badIndexMsg) - end - - checkPartialAssign{1} - checkPartialAssign{2} - checkPartialAssign{3} - checkPartialAssign{1,2} - checkPartialAssign{2,3} - checkPartialAssign{1,3} - checkPartialAssign{} - - local shouldErrorMsg = "Assignment with too many indices should error" - mytester:assertError(function() reference[{1, 1, 1, 1}] = 1 end, shouldErrorMsg) - mytester:assertError(function() reference[{1, 1, 1, {1, 1}}] = 1 end, shouldErrorMsg) - mytester:assertError(function() reference[{3, 3, 3, 3, 3, 3, 3, 3}] = 1 end, shouldErrorMsg) -end - -function torchtest.indexCopy() - local nCopy, nDest = 3, 20 - local dest = torch.randn(nDest,4,5) - local src = torch.randn(nCopy,4,5) - local idx = torch.randperm(nDest):narrow(1, 1, nCopy):long() - local dest2 = dest:clone() - dest:indexCopy(1, idx, src) - for i=1,idx:size(1) do - dest2[idx[i]]:copy(src[i]) - end - mytester:assertTensorEq(dest, dest2, 0.000001, "indexCopy tensor error") - - local dest = torch.randn(nDest) - local src = torch.randn(nCopy) - local idx = torch.randperm(nDest):narrow(1, 1, nCopy):long() - local dest2 = dest:clone() - dest:indexCopy(1, idx, src) - for i=1,idx:size(1) do - dest2[idx[i]] = src[i] - end - mytester:assertTensorEq(dest, dest2, 0.000001, "indexCopy scalar error") -end - -function torchtest.indexAdd() - local nCopy, nDest = 3, 20 - local dest = torch.randn(nDest,4,5) - local src = torch.randn(nCopy,4,5) - local idx = torch.randperm(nDest):narrow(1, 1, nCopy):long() - local dest2 = dest:clone() - dest:indexAdd(1, idx, src) - for i=1,idx:size(1) do - dest2[idx[i]]:add(src[i]) - end - mytester:assertTensorEq(dest, dest2, 0.000001, "indexAdd tensor error") - - local dest = torch.randn(nDest) - local src = torch.randn(nCopy) - local idx = torch.randperm(nDest):narrow(1, 1, nCopy):long() - local dest2 = dest:clone() - dest:indexAdd(1, idx, src) - for i=1,idx:size(1) do - dest2[idx[i]] = dest2[idx[i]] + src[i] - end - mytester:assertTensorEq(dest, dest2, 0.000001, "indexAdd scalar error") -end - --- Fill idx with valid indices. -local function fillIdx(idx, dim, dim_size, elems_per_row, m, n, o) - for i = 1, (dim == 1 and 1 or m) do - for j = 1, (dim == 2 and 1 or n) do - for k = 1, (dim == 3 and 1 or o) do - local ii = {i, j, k} - ii[dim] = {} - idx[ii] = torch.randperm(dim_size)[{{1, elems_per_row}}] - end - end - end -end - -function torchtest.gather() - local m, n, o = torch.random(10, 20), torch.random(10, 20), torch.random(10, 20) - local elems_per_row = torch.random(10) - local dim = torch.random(3) - - local src = torch.randn(m, n, o) - local idx_size = {m, n, o} - idx_size[dim] = elems_per_row - local idx = torch.LongTensor():resize(unpack(idx_size)) - fillIdx(idx, dim, src:size(dim), elems_per_row, m, n, o) - - local actual = torch.gather(src, dim, idx) - local expected = torch.Tensor():resize(unpack(idx_size)) - for i = 1, idx_size[1] do - for j = 1, idx_size[2] do - for k = 1, idx_size[3] do - local ii = {i, j, k} - ii[dim] = idx[i][j][k] - expected[i][j][k] = src[ii] - end - end - end - mytester:assertTensorEq(actual, expected, 0, "Wrong values for gather") - - idx[1][1][1] = 23 - mytester:assertError(function() torch.gather(src, dim, idx) end, - "Invalid index not detected") -end - -function torchtest.gatherMax() - local src = torch.randn(3, 4, 5) - local expected, idx = src:max(3) - local actual = torch.gather(src, 3, idx) - mytester:assertTensorEq(actual, expected, 0, "Wrong values for gather") -end - -function torchtest.scatter() - local m, n, o = torch.random(10, 20), torch.random(10, 20), torch.random(10, 20) - local elems_per_row = torch.random(10) - local dim = torch.random(3) - - local idx_size = {m, n, o} - idx_size[dim] = elems_per_row - local idx = torch.LongTensor():resize(unpack(idx_size)) - fillIdx(idx, dim, ({m, n, o})[dim], elems_per_row, m, n, o) - local src = torch.Tensor():resize(unpack(idx_size)):normal() - - local actual = torch.zeros(m, n, o):scatter(dim, idx, src) - local expected = torch.zeros(m, n, o) - for i = 1, idx_size[1] do - for j = 1, idx_size[2] do - for k = 1, idx_size[3] do - local ii = {i, j, k} - ii[dim] = idx[i][j][k] - expected[ii] = src[i][j][k] - end - end - end - mytester:assertTensorEq(actual, expected, 0, "Wrong values for scatter") - - idx[1][1][1] = 34 - mytester:assertError(function() torch.zeros(m, n, o):scatter(dim, idx, src) end, - "Invalid index not detected") -end - -function torchtest.scatterFill() - local m, n, o = torch.random(10, 20), torch.random(10, 20), torch.random(10, 20) - local elems_per_row = torch.random(10) - local dim = torch.random(3) - - local val = torch.uniform() - local idx_size = {m, n, o} - idx_size[dim] = elems_per_row - local idx = torch.LongTensor():resize(unpack(idx_size)) - fillIdx(idx, dim, ({m, n, o})[dim], elems_per_row, m, n, o) - - local actual = torch.zeros(m, n, o):scatter(dim, idx, val) - local expected = torch.zeros(m, n, o) - for i = 1, idx_size[1] do - for j = 1, idx_size[2] do - for k = 1, idx_size[3] do - local ii = {i, j, k} - ii[dim] = idx[i][j][k] - expected[ii] = val - end - end - end - mytester:assertTensorEq(actual, expected, 0, "Wrong values for scatter") - - idx[1][1][1] = 28 - mytester:assertError(function() torch.zeros(m, n, o):scatter(dim, idx, val) end, - "Invalid index not detected") -end - -function torchtest.maskedCopy() - local nCopy, nDest = 3, 10 - local dest = torch.randn(nDest) - local src = torch.randn(nCopy) - local mask = torch.ByteTensor{0,0,0,0,1,0,1,0,1,0} - local dest2 = dest:clone() - dest:maskedCopy(mask, src) - local j = 1 - for i=1,nDest do - if mask[i] == 1 then - dest2[i] = src[j] - j = j + 1 - end - end - mytester:assertTensorEq(dest, dest2, 0.000001, "maskedCopy error") - - -- make source bigger than number of 1s in mask - src = torch.randn(nDest) - local ok = pcall(dest.maskedCopy, dest, mask, src) - mytester:assert(ok, "maskedCopy incorrect complaint when" - .. " src is bigger than mask's one count") - - src = torch.randn(nCopy - 1) -- make src smaller. this should fail - local ok = pcall(dest.maskedCopy, dest, mask, src) - mytester:assert(not ok, "maskedCopy not erroring when" - .. " src is smaller than mask's one count") -end - -function torchtest.maskedSelect() - local nSrc = 10 - local src = torch.randn(nSrc) - local mask = torch.rand(nSrc):mul(2):floor():byte() - local dst = torch.Tensor() - dst:maskedSelect(src, mask) - local dst2 = {} - for i=1,nSrc do - if mask[i] == 1 then - table.insert(dst2, src[i]) - end - end - mytester:assertTensorEq(dst, torch.DoubleTensor(dst2), 0.000001, "maskedSelect error") -end - -function torchtest.maskedFill() - local nDst = 10 - local dst = torch.randn(nDst) - local mask = torch.rand(nDst):mul(2):floor():byte() - local val = math.random() - local dst2 = dst:clone() - dst:maskedFill(mask, val) - for i=1,nDst do - if mask[i] == 1 then - dst2[i] = val - end - end - mytester:assertTensorEq(dst, dst2, 0.000001, "maskedFill error") -end - -function torchtest.abs() - local size = 1000 - local range = 1000 - local original = torch.rand(size):mul(range) - -- Tensor filled with {-1,1} - local switch = torch.rand(size):mul(2):floor():mul(2):add(-1) - - local types = {'torch.DoubleTensor', 'torch.FloatTensor', 'torch.LongTensor', 'torch.IntTensor'} - for k,t in ipairs(types) do - local data = original:type(t) - local switch = switch:type(t) - local input = torch.cmul(data, switch) - mytester:assertTensorEq(input:abs(), data, 1e-16, 'Error in abs() for '..t) - end - - -- Checking that the right abs function is called for LongTensor - local bignumber - if torch.LongTensor():elementSize() > 4 then - bignumber = 2^31 + 1 - else - bignumber = 2^15 + 1 - end - local input = torch.LongTensor{-bignumber} - mytester:assertgt(input:abs()[1], 0, 'torch.abs(3)') -end - -function torchtest.classInModule() - -- Need a global for this module - _mymodule123 = {} - local x = torch.class('_mymodule123.myclass') - mytester:assert(x ~= nil, 'Could not create class in module') - -- Remove the global - _G['_mymodule123'] = nil - debug.getregistry()['_mymodule123.myclass']=nil -end - -function torchtest.classNoModule() - local x = torch.class('_myclass123') - mytester:assert(x ~= nil, 'Could not create class in module') - debug.getregistry()['_myclass123'] = nil -end - -function torchtest.type() - local objects = {torch.DoubleTensor(), {}, nil, 2, "asdf"} - local types = {'torch.DoubleTensor', 'table', 'nil', 'number', 'string'} - for i,obj in ipairs(objects) do - mytester:assert(torch.type(obj) == types[i], "wrong type "..types[i]) - end -end - -function torchtest.isTypeOfInheritance() - do - local A = torch.class('A') - local B, parB = torch.class('B', 'A') - local C, parC = torch.class('C', 'A') - end - local a, b, c = A(), B(), C() - - mytester:assert(torch.isTypeOf(a, 'A'), 'isTypeOf error, string spec') - mytester:assert(torch.isTypeOf(a, A), 'isTypeOf error, constructor') - mytester:assert(torch.isTypeOf(b, 'B'), 'isTypeOf error child class') - mytester:assert(torch.isTypeOf(b, B), 'isTypeOf error child class ctor') - mytester:assert(torch.isTypeOf(b, 'A'), 'isTypeOf error: inheritance') - mytester:assert(torch.isTypeOf(b, A), 'isTypeOf error: inheritance') - mytester:assert(not torch.isTypeOf(c, 'B'), 'isTypeOf error: common parent') - mytester:assert(not torch.isTypeOf(c, B), 'isTypeOf error: common parent') - debug.getregistry()['A'] = nil - debug.getregistry()['B'] = nil - debug.getregistry()['C'] = nil -end - -function torchtest.isTypeOfPartial() - do - local TorchDummy = torch.class('TorchDummy') - local OtherTorchDummy = torch.class('OtherTorchDummy') - local TorchMember = torch.class('TorchMember') - local OtherTorchMember = torch.class('OtherTorchMember') - local FirstTorchMember = torch.class('FirstTorchMember', - 'TorchMember') - local SecondTorchMember = torch.class('SecondTorchMember', - 'TorchMember') - local ThirdTorchMember = torch.class('ThirdTorchMember', - 'OtherTorchMember') - end - local td, otd = TorchDummy(), OtherTorchDummy() - local tm, ftm, stm, ttm = TorchMember(), FirstTorchMember(), - SecondTorchMember(), ThirdTorchMember() - - mytester:assert(not torch.isTypeOf(td, 'OtherTorchDummy'), - 'isTypeOf error: incorrect partial match') - mytester:assert(not torch.isTypeOf(otd, 'TorchDummy'), - 'isTypeOf error: incorrect partial match') - mytester:assert(torch.isTypeOf(tm, 'TorchMember'), - 'isTypeOf error, string spec') - mytester:assert(torch.isTypeOf(tm, TorchMember), - 'isTypeOf error, constructor') - mytester:assert(torch.isTypeOf(ftm, 'FirstTorchMember'), - 'isTypeOf error child class') - mytester:assert(torch.isTypeOf(ftm, FirstTorchMember), - 'isTypeOf error child class ctor') - mytester:assert(torch.isTypeOf(ftm, 'TorchMember'), - 'isTypeOf error: inheritance') - mytester:assert(torch.isTypeOf(ftm, TorchMember), - 'isTypeOf error: inheritance') - mytester:assert(not torch.isTypeOf(stm, 'FirstTorchMember'), - 'isTypeOf error: common parent') - mytester:assert(not torch.isTypeOf(stm, FirstTorchMember), - 'isTypeOf error: common parent') - mytester:assert(not torch.isTypeOf(ttm, TorchMember), - 'isTypeOf error: inheritance') - mytester:assert(not torch.isTypeOf(ttm, 'TorchMember'), - 'isTypeOf error: inheritance') - debug.getregistry()['TorchDummy'] = nil - debug.getregistry()['OtherTorchDummy'] = nil - debug.getregistry()['TorchMember'] = nil - debug.getregistry()['OtherTorchMember'] = nil - debug.getregistry()['FirstTorchMember'] = nil - debug.getregistry()['SecondTorchMember'] = nil - debug.getregistry()['ThirdTorchMember'] = nil -end - -function torchtest.isTypeOfPattern() - local t = torch.LongTensor() - mytester:assert(torch.isTypeOf(t, torch.LongTensor), - 'isTypeOf error: incorrect match') - mytester:assert(not torch.isTypeOf(t, torch.IntTensor), - 'isTypeOf error: incorrect match') - mytester:assert(torch.isTypeOf(t, 'torch.LongTensor'), - 'isTypeOf error: incorrect match') - mytester:assert(not torch.isTypeOf(t, 'torch.Long'), - 'isTypeOf error: incorrect match') - mytester:assert(torch.isTypeOf(t, 'torch.*Tensor'), - 'isTypeOf error: incorrect match') - mytester:assert(torch.isTypeOf(t, '.*Long'), - 'isTypeOf error: incorrect match') - mytester:assert(not torch.isTypeOf(t, 'torch.IntTensor'), - 'isTypeOf error: incorrect match') -end - -function torchtest.isTensor() - for k,v in ipairs({"real", "half"}) do - torchtest_isTensor(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_isTensor(func) - local t = func(torch.randn(3,4)) - mytester:assert(torch.isTensor(t), 'error in isTensor') - mytester:assert(torch.isTensor(t[1]), 'error in isTensor for subTensor') - mytester:assert(not torch.isTensor(t[1][2]), 'false positive in isTensor') - mytester:assert(torch.Tensor.isTensor(t), 'alias not working') -end - -function torchtest.isStorage() - for k,v in ipairs({"real", "half"}) do - torchtest_isStorage(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_isStorage(func) - local t = torch.randn(3,4) - mytester:assert(torch.isStorage(t:storage()), 'error in isStorage') - mytester:assert(not torch.isStorage(t), 'false positive in isStorage') -end - -function torchtest.view() - for k,v in ipairs({"real", "half"}) do - torchtest_view(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_view(func) - local tensor = func(torch.rand(15)) - local template = func(torch.rand(3,5)) - local target = template:size():totable() - mytester:assertTableEq(tensor:viewAs(template):size():totable(), target, 'Error in viewAs') - mytester:assertTableEq(tensor:view(3,5):size():totable(), target, 'Error in view') - mytester:assertTableEq(tensor:view(torch.LongStorage{3,5}):size():totable(), target, 'Error in view using LongStorage') - mytester:assertTableEq(tensor:view(-1,5):size():totable(), target, 'Error in view using dimension -1') - mytester:assertTableEq(tensor:view(3,-1):size():totable(), target, 'Error in view using dimension -1') - local tensor_view = tensor:view(5,3) - tensor_view:fill(torch.rand(1)[1]) - mytester:asserteq((tensor_view-tensor):abs():max(), 0, 'Error in view') - - local target_tensor = func(torch.Tensor()) - mytester:assertTableEq(target_tensor:viewAs(tensor, template):size():totable(), target, 'Error in viewAs') - mytester:assertTableEq(target_tensor:view(tensor, 3,5):size():totable(), target, 'Error in view') - mytester:assertTableEq(target_tensor:view(tensor, torch.LongStorage{3,5}):size():totable(), target, 'Error in view using LongStorage') - mytester:assertTableEq(target_tensor:view(tensor, -1,5):size():totable(), target, 'Error in view using dimension -1') - mytester:assertTableEq(target_tensor:view(tensor, 3,-1):size():totable(), target, 'Error in view using dimension -1') - target_tensor:fill(torch.rand(1)[1]) - mytester:asserteq((target_tensor-tensor):abs():max(), 0, 'Error in viewAs') -end - -function torchtest.expand() - for k,v in ipairs({"real", "half"}) do - torchtest_expand(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_expand(func) - local result = func(torch.Tensor()) - local tensor = func(torch.rand(8,1)) - local template = func(torch.rand(8,5)) - local target = template:size():totable() - mytester:assertTableEq(tensor:expandAs(template):size():totable(), target, 'Error in expandAs') - mytester:assertTableEq(tensor:expand(8,5):size():totable(), target, 'Error in expand') - mytester:assertTableEq(tensor:expand(torch.LongStorage{8,5}):size():totable(), target, 'Error in expand using LongStorage') - result:expandAs(tensor,template) - mytester:assertTableEq(result:size():totable(), target, 'Error in expandAs using result') - result:expand(tensor,8,5) - mytester:assertTableEq(result:size():totable(), target, 'Error in expand using result') - result:expand(tensor,torch.LongStorage{8,5}) - mytester:assertTableEq(result:size():totable(), target, 'Error in expand using result and LongStorage') - mytester:asserteq((result:mean(2):view(8,1)-tensor):abs():max(), 0, 'Error in expand (not equal)') -end - -function torchtest.repeatTensor() - for k,v in ipairs({"real", "half"}) do - torchtest_repeatTensor(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_repeatTensor(func, mean) - local result = func(torch.Tensor()) - local tensor = func(torch.rand(8,4)) - local size = {3,1,1} - local sizeStorage = torch.LongStorage(size) - local target = {3,8,4} - mytester:assertTableEq(tensor:repeatTensor(unpack(size)):size():totable(), target, 'Error in repeatTensor') - mytester:assertTableEq(tensor:repeatTensor(sizeStorage):size():totable(), target, 'Error in repeatTensor using LongStorage') - result:repeatTensor(tensor,unpack(size)) - mytester:assertTableEq(result:size():totable(), target, 'Error in repeatTensor using result') - result:repeatTensor(tensor,sizeStorage) - mytester:assertTableEq(result:size():totable(), target, 'Error in repeatTensor using result and LongStorage') - mytester:asserteq((result:mean(1):view(8,4)-tensor):abs():max(), 0, 'Error in repeatTensor (not equal)') -end - -function torchtest.isSameSizeAs() - for k,v in ipairs({"real", "half"}) do - torchtest_isSameSizeAs(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_isSameSizeAs(func) - local t1 = func(torch.Tensor(3, 4, 9, 10)) - local t2 = func(torch.Tensor(3, 4)) - local t3 = func(torch.Tensor(1, 9, 3, 3)) - local t4 = func(torch.Tensor(3, 4, 9, 10)) - - mytester:assert(t1:isSameSizeAs(t2) == false, "wrong answer ") - mytester:assert(t1:isSameSizeAs(t3) == false, "wrong answer ") - mytester:assert(t1:isSameSizeAs(t4) == true, "wrong answer ") -end - -function torchtest.isSetTo() - for k,v in ipairs({"real", "half"}) do - torchtest_isSetTo(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_isSetTo(func) - local t1 = func(torch.Tensor(3, 4, 9, 10)) - local t2 = func(torch.Tensor(3, 4, 9, 10)) - local t3 = func(torch.Tensor()):set(t1) - local t4 = t3:reshape(12, 90) - mytester:assert(t1:isSetTo(t2) == false, "tensors do not share storage") - mytester:assert(t1:isSetTo(t3) == true, "tensor is set to other") - mytester:assert(t3:isSetTo(t1) == true, "isSetTo should be symmetric") - mytester:assert(t1:isSetTo(t4) == false, "tensors have different view") - mytester:assert(not func(torch.Tensor()):isSetTo(func(torch.Tensor())), - "Tensors with no storages should not appear to be set " .. - "to each other") -end - -function torchtest.equal() - -- Contiguous, 1D - local t1 = torch.Tensor{3, 4, 9, 10} - local t2 = t1:clone() - local t3 = torch.Tensor{1, 9, 3, 10} - local t4 = torch.Tensor{3, 4, 9} - local t5 = torch.Tensor() - mytester:assert(t1:equal(t2) == true, "wrong answer ") - mytester:assert(t1:equal(t3) == false, "wrong answer ") - mytester:assert(t1:equal(t4) == false, "wrong answer ") - mytester:assert(t1:equal(t5) == false, "wrong answer ") - mytester:assert(torch.equal(t1, t2) == true, "wrong answer ") - mytester:assert(torch.equal(t1, t3) == false, "wrong answer ") - mytester:assert(torch.equal(t1, t4) == false, "wrong answer ") - mytester:assert(torch.equal(t1, t5) == false, "wrong answer ") - - -- Non contiguous, 2D - local s = torch.Tensor({{1, 2, 3, 4}, {5, 6, 7, 8}}) - local s1 = s[{{}, {2, 3}}] - local s2 = s1:clone() - local s3 = torch.Tensor({{2, 3}, {6, 7}}) - local s4 = torch.Tensor({{0, 0}, {0, 0}}) - - mytester:assert(not s1:isContiguous(), "wrong answer ") - mytester:assert(s1:equal(s2) == true, "wrong answer ") - mytester:assert(s1:equal(s3) == true, "wrong answer ") - mytester:assert(s1:equal(s4) == false, "wrong answer ") - mytester:assert(torch.equal(s1, s2) == true, "wrong answer ") - mytester:assert(torch.equal(s1, s3) == true, "wrong answer ") - mytester:assert(torch.equal(s1, s4) == false, "wrong answer ") -end - -function torchtest.isSize() - for k,v in ipairs({"real", "half"}) do - torchtest_isSize(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_isSize(func) - local t1 = func(torch.Tensor(3, 4, 5)) - local s1 = torch.LongStorage({3, 4, 5}) - local s2 = torch.LongStorage({5, 4, 3}) - - mytester:assert(t1:isSize(s1) == true, "wrong answer ") - mytester:assert(t1:isSize(s2) == false, "wrong answer ") - mytester:assert(t1:isSize(t1:size()) == true, "wrong answer ") -end - -function torchtest.elementSize() - local byte = torch.ByteStorage():elementSize() - local char = torch.CharStorage():elementSize() - local short = torch.ShortStorage():elementSize() - local int = torch.IntStorage():elementSize() - local long = torch.LongStorage():elementSize() - local float = torch.FloatStorage():elementSize() - local double = torch.DoubleStorage():elementSize() - local half = torch.HalfStorage():elementSize() - - mytester:asserteq(byte, torch.ByteTensor():elementSize()) - mytester:asserteq(char, torch.CharTensor():elementSize()) - mytester:asserteq(short, torch.ShortTensor():elementSize()) - mytester:asserteq(int, torch.IntTensor():elementSize()) - mytester:asserteq(long, torch.LongTensor():elementSize()) - mytester:asserteq(float, torch.FloatTensor():elementSize()) - mytester:asserteq(double, torch.DoubleTensor():elementSize()) - mytester:asserteq(half, torch.HalfTensor():elementSize()) - - mytester:assertne(byte, 0) - mytester:assertne(char, 0) - mytester:assertne(short, 0) - mytester:assertne(int, 0) - mytester:assertne(long, 0) - mytester:assertne(float, 0) - mytester:assertne(double, 0) - mytester:assertne(half, 0) - - -- These tests are portable, not necessarily strict for your system. - mytester:asserteq(byte, 1) - mytester:asserteq(char, 1) - mytester:assert(short >= 2) - mytester:assert(int >= 2) - mytester:assert(int >= short) - mytester:assert(long >= 4) - mytester:assert(long >= int) - mytester:assert(double >= float) - mytester:assert(half <= float) -end - -function torchtest.split() - for k,v in ipairs({"real", "half"}) do - torchtest_split(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_split(func) - local result = {} - local tensor = func(torch.rand(7,4)) - local splitSize = 3 - local targetSize = {{3,4},{3,4},{1,4}} - local dim = 1 - local splits = tensor:split(splitSize, dim) - local start = 1 - for i, split in ipairs(splits) do - mytester:assertTableEq(split:size():totable(), targetSize[i], 'Size error in split '..i) - mytester:assertTensorEq(tensor:narrow(dim, start, targetSize[i][dim]), split, 0.00001, 'Content error in split '..i) - start = start + targetSize[i][dim] - end - torch.split(result, tensor, splitSize, dim) - local start = 1 - for i, split in ipairs(result) do - mytester:assertTableEq(split:size():totable(), targetSize[i], 'Result size error in split '..i) - mytester:assertTensorEq(tensor:narrow(dim, start, targetSize[i][dim]), split, 0.000001, 'Result content error in split '..i) - start = start + targetSize[i][dim] - end - mytester:asserteq(#splits, #result, 'Non-consistent output size from split') - for i, split in ipairs(splits) do - mytester:assertTensorEq(split,result[i], 0, 'Non-consistent outputs from split') - end -end - -function torchtest.chunk() - for k,v in ipairs({"real", "half"}) do - torchtest_chunk(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_chunk(func) - local result = {} - local tensor = func(torch.rand(4,7)) - local nChunk = 3 - local targetSize = {{4,3},{4,3},{4,1}} - local dim = 2 - local splits = tensor:chunk(nChunk, dim) - local start = 1 - for i, split in ipairs(splits) do - mytester:assertTableEq(split:size():totable(), targetSize[i], 'Size error in chunk '..i) - mytester:assertTensorEq(tensor:narrow(dim, start, targetSize[i][dim]), split, 0.00001, 'Content error in chunk '..i) - start = start + targetSize[i][dim] - end - torch.split(result, tensor, nChunk, dim) - local start = 1 - for i, split in ipairs(result) do - mytester:assertTableEq(split:size():totable(), targetSize[i], 'Result size error in chunk '..i) - mytester:assertTensorEq(tensor:narrow(dim, start, targetSize[i][dim]), split, 0.000001, 'Result content error in chunk '..i) - start = start + targetSize[i][dim] - end -end - -function torchtest.table() - local convStorage = { - ['real'] = 'FloatStorage', - ['half'] = 'HalfStorage' - } - for k,v in ipairs(convStorage) do - torchtest_totable(torch.getmetatable(torch.Tensor():type())[k], v) - end -end - -function torchtest_totable(func, storageType) - local table0D = {} - local tensor0D = func(torch.Tensor(table0D)) - mytester:assertTableEq(torch.totable(tensor0D), table0D, 'tensor0D:totable incorrect') - - local table1D = {1, 2, 3} - local tensor1D = func(torch.Tensor(table1D)) - local storage = torch[storageType](table1D) - mytester:assertTableEq(tensor1D:totable(), table1D, 'tensor1D:totable incorrect') - mytester:assertTableEq(storage:totable(), table1D, 'storage:totable incorrect') - mytester:assertTableEq(torch.totable(tensor1D), table1D, 'torch.totable incorrect for Tensors') - mytester:assertTableEq(torch.totable(storage), table1D, 'torch.totable incorrect for Storages') - - local table2D = {{1, 2}, {3, 4}} - local tensor2D = func(torch.Tensor(table2D)) - mytester:assertTableEq(tensor2D:totable(), table2D, 'tensor2D:totable incorrect') - - local tensor3D = func(torch.Tensor({{{1, 2}, {3, 4}}, {{5, 6}, {7, 8}}})) - local tensorNonContig = tensor3D:select(2, 2) - mytester:assert(not tensorNonContig:isContiguous(), 'invalid test') - mytester:assertTableEq(tensorNonContig:totable(), {{3, 4}, {7, 8}}, - 'totable() incorrect for non-contiguous tensors') -end - -function torchtest.permute() - for k,v in ipairs({"real", "half"}) do - torchtest_permute(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function torchtest_permute(func) - local orig = {1,2,3,4,5,6,7} - local perm = torch.randperm(7):totable() - local x = torch.Tensor(unpack(orig)):fill(0) - local new = x:permute(unpack(perm)):size():totable() - mytester:assertTableEq(perm, new, 'Tensor:permute incorrect') - mytester:assertTableEq(x:size():totable(), orig, 'Tensor:permute changes tensor') -end - -function torchtest.serialize() - local tableObj = {6, a = 42} - local tensObj = torch.randn(3,4,5) - - -- Test serializing a table - local serString = torch.serialize(tableObj) - local serStorage = torch.serializeToStorage(tableObj) - mytester:assertTableEq(tableObj, torch.deserialize(serString)) - mytester:assertTableEq(tableObj, torch.deserializeFromStorage(serStorage)) - - -- Test serializing a Tensor - serString = torch.serialize(tensObj) - serStorage = torch.serializeToStorage(tensObj) - mytester:assertTensorEq(tensObj, torch.deserialize(serString), 1e-10) - mytester:assertTensorEq(tensObj, torch.deserializeFromStorage(serStorage), 1e-10) -end - -function torchtest.storageview() - local s1 = torch.LongStorage({3, 4, 5}) - local s2 = torch.LongStorage(s1, 2) - - mytester:assert(s2:size() == 2, "should be size 2") - mytester:assert(s2[1] == s1[2], "should have 4 at position 1") - mytester:assert(s2[2] == s1[3], "should have 5 at position 2") - - s2[1] = 13 - mytester:assert(13 == s1[2], "should have 13 at position 1") -end - -function torchtest.nonzero() - local nSrc = 12 - - local types = { - 'torch.ByteTensor', - 'torch.CharTensor', - 'torch.ShortTensor', - 'torch.IntTensor', - 'torch.FloatTensor', - 'torch.DoubleTensor', - 'torch.LongTensor', - } - - local shapes = { - torch.LongStorage{12}, - torch.LongStorage{12, 1}, - torch.LongStorage{1, 12}, - torch.LongStorage{6, 2}, - torch.LongStorage{3, 2, 2}, - } - - for _, type in ipairs(types) do - local tensor = torch.rand(nSrc):mul(2):floor():type(type) - for _, shape in ipairs(shapes) do - tensor = tensor:reshape(shape) - local dst1 = torch.nonzero(tensor) - local dst2 = tensor:nonzero() - -- Does not work. Torch uses the first argument to determine what - -- type the Tensor is expected to be. In our case the second argument - -- determines the type of Tensor. - --local dst3 = torch.LongTensor() - --torch.nonzero(dst3, tensor) - -- However, there are workarounds to this issue when it is desired to - -- use an existing tensor for the result: - local dst4 = torch.LongTensor() - tensor.nonzero(dst4, tensor) - if shape:size() == 1 then - local dst = {} - for i = 1 , nSrc do - if tensor[i] ~= 0 then - table.insert(dst, i) - end - end - mytester:assertTensorEq(dst1:select(2, 1), torch.LongTensor(dst), 0.0, - "nonzero error") - mytester:assertTensorEq(dst2:select(2, 1), torch.LongTensor(dst), 0.0, - "nonzero error") - --mytester:assertTensorEq(dst3:select(2, 1), torch.LongTensor(dst), - -- 0.0, "nonzero error") - mytester:assertTensorEq(dst4:select(2, 1), torch.LongTensor(dst), 0.0, - "nonzero error") - elseif shape:size() == 2 then - -- This test will allow through some false positives. It only checks - -- that the elements flagged positive are indeed non-zero. - for i=1,dst1:size()[1] do - mytester:assert(tensor[dst1[i][1]][dst1[i][2]] ~= 0) - end - elseif shape:size() == 3 then - -- This test will allow through some false positives. It only checks - -- that the elements flagged positive are indeed non-zero. - for i=1,dst1:size()[1] do - mytester:assert(tensor[dst1[i][1]][dst1[i][2]][dst1[i][3]] ~= 0) - end - end - end - end - -end - -function torchtest.testheaptracking() - local oldheaptracking = torch._heaptracking - if oldheaptracking == nil then - oldheaptracking = false - end - torch.setheaptracking(true) - mytester:assert(torch._heaptracking == true, 'Heap tracking expected true') - - torch.setheaptracking(false) - mytester:assert(torch._heaptracking == false, 'Heap tracking expected false') - - -- put heap tracking to its original state - torch.setheaptracking(oldheaptracking) -end - -function torchtest.bernoulli() - local size = torch.LongStorage{10, 10} - local t = torch.ByteTensor(size) - - local function isBinary(t) - return torch.ne(t, 0):cmul(torch.ne(t, 1)):sum() == 0 - end - - local p = 0.5 - t:bernoulli(p) - mytester:assert(isBinary(t), 'Sample from torch.bernoulli is not binary') - - local p = torch.rand(size) - t:bernoulli(p) - mytester:assert(isBinary(t), 'Sample from torch.bernoulli is not binary') -end - -function torchtest.logNormal() - local t = torch.FloatTensor(10, 10) - local mean, std = torch.uniform(), 0.1 * torch.uniform() - local tolerance = 0.02 - - t:logNormal(mean, std) - local logt = t:log() - mytester:assertalmosteq(logt:mean(), mean, tolerance, 'mean is wrong') - mytester:assertalmosteq(logt:std(), std, tolerance, 'tolerance is wrong') -end - -function torch.test(tests) - torch.setheaptracking(true) - math.randomseed(os.time()) - if torch.getdefaulttensortype() == 'torch.FloatTensor' then - precision = 1e-4 - elseif torch.getdefaulttensortype() == 'torch.DoubleTensor' then - precision = 1e-8 - end - mytester = torch.Tester() - mytester:add(torchtest) - mytester:run(tests) - return mytester -end diff --git a/contrib/lua-torch/torch7/test/test_Multinomial.lua b/contrib/lua-torch/torch7/test/test_Multinomial.lua deleted file mode 100644 index 9069ecbedd..0000000000 --- a/contrib/lua-torch/torch7/test/test_Multinomial.lua +++ /dev/null @@ -1,25 +0,0 @@ --- Test multinomial for rare events (based on https://github.com/torch/torch7/issues/418) --- and for performance (cf. https://github.com/torch/torch7/issues/453) - -sys.tic() -do - local p = torch.FloatTensor(1001000):fill(1) - p:narrow(1, 50001, 50000):fill(1e-3) - p:div(p:sum()) - local N = 1001000 - - local n = 0 - local c = torch.LongTensor(p:nElement()):zero() - local c_ptr = c:data() - 1 - local tmp = torch.LongTensor() - for i = 1, 100 do - p.multinomial(tmp, p, N, true); - n = n + N - tmp:apply(function(i) c_ptr[i] = c_ptr[i] + 1 end) - end - - local actual = c:narrow(1, 50001, 50000):sum() - local expected = n*p:narrow(1, 50001, 50000):sum() - print('Actual, Expected: ', actual, expected) -end -print('Time spent: ', sys.toc()) diff --git a/contrib/lua-torch/torch7/test/test_Tester.lua b/contrib/lua-torch/torch7/test/test_Tester.lua deleted file mode 100644 index a283360889..0000000000 --- a/contrib/lua-torch/torch7/test/test_Tester.lua +++ /dev/null @@ -1,626 +0,0 @@ -require 'torch' - -local tester = torch.Tester() - -local MESSAGE = "a really useful informative error message" - -local subtester = torch.Tester() --- The message only interests us in case of failure -subtester._success = function(self) return true, MESSAGE end -subtester._failure = function(self, message) return false, message end - -local tests = torch.TestSuite() - -local test_name_passed_to_setUp -local calls_to_setUp = 0 -local calls_to_tearDown = 0 - -local originalIoWrite = io.write -local function disableIoWrite() - io.write = function() end -end -local function enableIoWrite() - io.write = originalIoWrite -end - -local function meta_assert_success(success, message) - tester:assert(success == true, "assert wasn't successful") - tester:assert(string.find(message, MESSAGE) ~= nil, "message doesn't match") -end -local function meta_assert_failure(success, message) - tester:assert(success == false, "assert didn't fail") - tester:assert(string.find(message, MESSAGE) ~= nil, "message doesn't match") -end - -function tests.really_test_assert() - assert((subtester:assert(true, MESSAGE)), - "subtester:assert doesn't actually work!") - assert(not (subtester:assert(false, MESSAGE)), - "subtester:assert doesn't actually work!") -end - -function tests.setEarlyAbort() - disableIoWrite() - - for _, earlyAbort in ipairs{false, true} do - local myTester = torch.Tester() - - local invokedCount = 0 - local myTests = {} - function myTests.t1() - invokedCount = invokedCount + 1 - myTester:assert(false) - end - myTests.t2 = myTests.t1 - - myTester:setEarlyAbort(earlyAbort) - myTester:add(myTests) - pcall(myTester.run, myTester) - - tester:assert(invokedCount == (earlyAbort and 1 or 2), - "wrong number of tests invoked for use with earlyAbort") - end - - enableIoWrite() -end - -function tests.setRethrowErrors() - disableIoWrite() - - local myTester = torch.Tester() - myTester:setRethrowErrors(true) - myTester:add(function() error("a throw") end) - - tester:assertErrorPattern(function() myTester:run() end, - "a throw", - "error should be rethrown") - - enableIoWrite() -end - -function tests.disable() - disableIoWrite() - - for disableCount = 1, 2 do - local myTester = torch.Tester() - local tests = {} - local test1Invoked = false - local test2Invoked = false - function tests.test1() - test1Invoked = true - end - function tests.test2() - test2Invoked = true - end - myTester:add(tests) - - if disableCount == 1 then - myTester:disable('test1'):run() - tester:assert((not test1Invoked) and test2Invoked, - "disabled test shouldn't have been invoked") - else - myTester:disable({'test1', 'test2'}):run() - tester:assert((not test1Invoked) and (not test2Invoked), - "disabled tests shouldn't have been invoked") - end - end - - enableIoWrite() -end - -function tests.assert() - meta_assert_success(subtester:assert(true, MESSAGE)) - meta_assert_failure(subtester:assert(false, MESSAGE)) -end - -local function testEqNe(eqExpected, ...) - if eqExpected then - meta_assert_success(subtester:eq(...)) - meta_assert_failure(subtester:ne(...)) - else - meta_assert_failure(subtester:eq(...)) - meta_assert_success(subtester:ne(...)) - end -end - ---[[ Test :assertGeneralEq and :assertGeneralNe (also known as :eq and :ne). - -Note that in-depth testing of testing of many specific types of data (such as -Tensor) is covered below, when we test specific functions (such as -:assertTensorEq). This just does a general check, as well as testing of testing -of mixed datatypes. -]] -function tests.assertGeneral() - local one = torch.Tensor{1} - - testEqNe(true, one, one, MESSAGE) - testEqNe(false, one, 1, MESSAGE) - testEqNe(true, "hi", "hi", MESSAGE) - testEqNe(true, {one, 1}, {one, 1}, MESSAGE) - testEqNe(true, {{{one}}}, {{{one}}}, MESSAGE) - testEqNe(false, {{{one}}}, {{one}}, MESSAGE) - testEqNe(true, torch.Storage{1}, torch.Storage{1}, MESSAGE) - testEqNe(false, torch.FloatStorage{1}, torch.LongStorage{1}, MESSAGE) - testEqNe(false, torch.Storage{1}, torch.Storage{1, 2}, MESSAGE) - testEqNe(false, "one", 1, MESSAGE) - testEqNe(false, {one}, {one + torch.Tensor{1e-10}}, MESSAGE) - testEqNe(true, {one}, {one + torch.Tensor{1e-10}}, 1e-9, MESSAGE) -end - -function tests.assertlt() - meta_assert_success(subtester:assertlt(1, 2, MESSAGE)) - meta_assert_failure(subtester:assertlt(2, 1, MESSAGE)) - meta_assert_failure(subtester:assertlt(1, 1, MESSAGE)) -end - -function tests.assertgt() - meta_assert_success(subtester:assertgt(2, 1, MESSAGE)) - meta_assert_failure(subtester:assertgt(1, 2, MESSAGE)) - meta_assert_failure(subtester:assertgt(1, 1, MESSAGE)) -end - -function tests.assertle() - meta_assert_success(subtester:assertle(1, 2, MESSAGE)) - meta_assert_failure(subtester:assertle(2, 1, MESSAGE)) - meta_assert_success(subtester:assertle(1, 1, MESSAGE)) -end - -function tests.assertge() - meta_assert_success(subtester:assertge(2, 1, MESSAGE)) - meta_assert_failure(subtester:assertge(1, 2, MESSAGE)) - meta_assert_success(subtester:assertge(1, 1, MESSAGE)) -end - -function tests.asserteq() - meta_assert_success(subtester:asserteq(1, 1, MESSAGE)) - meta_assert_failure(subtester:asserteq(1, 2, MESSAGE)) -end - -function tests.assertalmosteq() - meta_assert_success(subtester:assertalmosteq(1, 1, MESSAGE)) - meta_assert_success(subtester:assertalmosteq(1, 1 + 1e-17, MESSAGE)) - meta_assert_success(subtester:assertalmosteq(1, 2, 2, MESSAGE)) - meta_assert_failure(subtester:assertalmosteq(1, 2, MESSAGE)) - meta_assert_failure(subtester:assertalmosteq(1, 3, 1, MESSAGE)) -end - -function tests.assertne() - meta_assert_success(subtester:assertne(1, 2, MESSAGE)) - meta_assert_failure(subtester:assertne(1, 1, MESSAGE)) -end - --- The `alsoTestEq` flag is provided to test :eq in addition to :assertTensorEq. --- The behaviour of the two isn't always the same due to handling of tensors of --- different dimensions but the same number of elements. -local function testTensorEqNe(eqExpected, alsoTestEq, ...) - if eqExpected then - meta_assert_success(subtester:assertTensorEq(...)) - meta_assert_failure(subtester:assertTensorNe(...)) - if alsoTestEq then - meta_assert_success(subtester:eq(...)) - meta_assert_failure(subtester:ne(...)) - end - else - meta_assert_failure(subtester:assertTensorEq(...)) - meta_assert_success(subtester:assertTensorNe(...)) - if alsoTestEq then - meta_assert_failure(subtester:eq(...)) - meta_assert_success(subtester:ne(...)) - end - end -end - -function tests.assertTensor_types() - local allTypes = { - torch.ByteTensor, - torch.CharTensor, - torch.ShortTensor, - torch.IntTensor, - torch.LongTensor, - torch.FloatTensor, - torch.DoubleTensor, - } - for _, tensor1 in ipairs(allTypes) do - for _, tensor2 in ipairs(allTypes) do - local t1 = tensor1():ones(10) - local t2 = tensor2():ones(10) - testTensorEqNe(tensor1 == tensor2, true, t1, t2, 1e-6, MESSAGE) - end - end - - testTensorEqNe(false, true, torch.FloatTensor(), torch.LongTensor(), MESSAGE) -end - -function tests.assertTensor_sizes() - local t = torch.Tensor() -- no dimensions - local t2 = torch.ones(2) - local t3 = torch.ones(3) - local t12 = torch.ones(1, 2) - assert(subtester._assertTensorEqIgnoresDims == true) -- default state - testTensorEqNe(false, false, t, t2, 1e-6, MESSAGE) - testTensorEqNe(false, false, t, t3, 1e-6, MESSAGE) - testTensorEqNe(false, false, t, t12, 1e-6, MESSAGE) - testTensorEqNe(false, false, t2, t3, 1e-6, MESSAGE) - testTensorEqNe(true, false, t2, t12, 1e-6, MESSAGE) - testTensorEqNe(false, false, t3, t12, 1e-6, MESSAGE) - subtester._assertTensorEqIgnoresDims = false - testTensorEqNe(false, true, t, t2, 1e-6, MESSAGE) - testTensorEqNe(false, true, t, t3, 1e-6, MESSAGE) - testTensorEqNe(false, true, t, t12, 1e-6, MESSAGE) - testTensorEqNe(false, true, t2, t3, 1e-6, MESSAGE) - testTensorEqNe(false, true, t2, t12, 1e-6, MESSAGE) - testTensorEqNe(false, true, t3, t12, 1e-6, MESSAGE) - subtester._assertTensorEqIgnoresDims = true -- reset back -end - -function tests.assertTensor_epsilon() - local t1 = torch.rand(100, 100) - local t2 = torch.rand(100, 100) * 1e-5 - local t3 = t1 + t2 - testTensorEqNe(true, true, t1, t3, 1e-4, MESSAGE) - testTensorEqNe(false, true, t1, t3, 1e-6, MESSAGE) -end - -function tests.assertTensor_arg() - local one = torch.Tensor{1} - - tester:assertErrorPattern( - function() subtester:assertTensorEq(one, 2) end, - "Second argument should be a Tensor") - - -- Test that assertTensorEq support message and tolerance in either ordering - tester:assertNoError( - function() subtester:assertTensorEq(one, one, 0.1, MESSAGE) end) - tester:assertNoError( - function() subtester:assertTensorEq(one, one, MESSAGE, 0.1) end) -end - -function tests.assertTensor() - local t1 = torch.randn(100, 100) - local t2 = t1:clone() - local t3 = torch.randn(100, 100) - testTensorEqNe(true, true, t1, t2, 1e-6, MESSAGE) - testTensorEqNe(false, true, t1, t3, 1e-6, MESSAGE) - testTensorEqNe(true, true, torch.Tensor(), torch.Tensor(), MESSAGE) -end - --- Check that calling assertTensorEq with two tensors with the same content but --- different dimensions gives a warning. -function tests.assertTensorDimWarning() - local myTester = torch.Tester() - myTester:add( - function() - myTester:assertTensorEq(torch.Tensor{{1}}, torch.Tensor{1}) - end) - - local warningGiven = false - io.write = function(s) - if string.match(s, 'but different dimensions') then - warningGiven = true - end - end - - myTester:run() - enableIoWrite() - - tester:assert(warningGiven, - "Calling :assertTensorEq({{1}}, {1}) should give a warning") -end - -local function testTableEqNe(eqExpected, ...) - if eqExpected then - meta_assert_success(subtester:assertTableEq(...)) - meta_assert_failure(subtester:assertTableNe(...)) - meta_assert_success(subtester:eq(...)) - meta_assert_failure(subtester:ne(...)) - else - meta_assert_failure(subtester:assertTableEq(...)) - meta_assert_success(subtester:assertTableNe(...)) - meta_assert_failure(subtester:eq(...)) - meta_assert_success(subtester:ne(...)) - end -end - -function tests.assertTable() - testTableEqNe(true, {1, 2, 3}, {1, 2, 3}, MESSAGE) - testTableEqNe(false, {1, 2, 3}, {3, 2, 1}, MESSAGE) - testTableEqNe(true, {1, 2, {4, 5}}, {1, 2, {4, 5}}, MESSAGE) - testTableEqNe(false, {1, 2, 3}, {1,2}, MESSAGE) - testTableEqNe(false, {1, 2, 3}, {1, 2, 3, 4}, MESSAGE) - testTableEqNe(true, {{1}}, {{1}}, MESSAGE) - testTableEqNe(false, {{1}}, {{{1}}}, MESSAGE) - testTableEqNe(true, {false}, {false}, MESSAGE) - testTableEqNe(false, {true}, {false}, MESSAGE) - testTableEqNe(false, {false}, {true}, MESSAGE) - - local tensor = torch.rand(100, 100) - local t1 = {1, "a", key = "value", tensor = tensor, subtable = {"nested"}} - local t2 = {1, "a", key = "value", tensor = tensor, subtable = {"nested"}} - testTableEqNe(true, t1, t2, MESSAGE) - for k, v in pairs(t1) do - local x = "something else" - t2[k] = nil - t2[x] = v - testTableEqNe(false, t1, t2, MESSAGE) - t2[x] = nil - t2[k] = x - testTableEqNe(false, t1, t2, MESSAGE) - t2[k] = v - testTableEqNe(true, t1, t2, MESSAGE) - end -end - -local function good_fn() end -local function bad_fn() error("muahaha!") end - -function tests.assertError() - meta_assert_success(subtester:assertError(bad_fn, MESSAGE)) - meta_assert_failure(subtester:assertError(good_fn, MESSAGE)) -end - -function tests.assertNoError() - meta_assert_success(subtester:assertNoError(good_fn, MESSAGE)) - meta_assert_failure(subtester:assertNoError(bad_fn, MESSAGE)) -end - -function tests.assertErrorPattern() - meta_assert_success(subtester:assertErrorPattern(bad_fn, "haha", MESSAGE)) - meta_assert_failure(subtester:assertErrorPattern(bad_fn, "hehe", MESSAGE)) -end - -function tests.testSuite_duplicateTests() - local function createDuplicateTests() - local tests = torch.TestSuite() - function tests.testThis() end - function tests.testThis() end - end - tester:assertErrorPattern(createDuplicateTests, - "Test testThis is already defined.") -end - ---[[ Returns a Tester with `numSuccess` success cases, `numFailure` failure - cases, and with an error if `hasError` is true. - Success and fail tests are evaluated with tester:eq -]] -local function genDummyTest(numSuccess, numFailure, hasError) - hasError = hasError or false - - local dummyTester = torch.Tester() - local dummyTests = torch.TestSuite() - - if numSuccess > 0 then - function dummyTests.testDummySuccess() - for i = 1, numSuccess do - dummyTester:eq({1}, {1}, '', 0) - end - end - end - - if numFailure > 0 then - function dummyTests.testDummyFailure() - for i = 1, numFailure do - dummyTester:eq({1}, {2}, '', 0) - end - end - end - - if hasError then - function dummyTests.testDummyError() - error('dummy error') - end - end - - return dummyTester:add(dummyTests) -end - -function tests.runStatusAndAssertCounts() - local emptyTest = genDummyTest(0, 0, false) - local sucTest = genDummyTest(1, 0, false) - local multSucTest = genDummyTest(4, 0, false) - local failTest = genDummyTest(0, 1, false) - local errTest = genDummyTest(0, 0, true) - local errFailTest = genDummyTest(0, 1, true) - local errSucTest = genDummyTest(1, 0, true) - local failSucTest = genDummyTest(1, 1, false) - local failSucErrTest = genDummyTest(1, 1, true) - - disableIoWrite() - - local success, msg = pcall(emptyTest.run, emptyTest) - tester:asserteq(success, true, "pcall should succeed for empty tests") - - local success, msg = pcall(sucTest.run, sucTest) - tester:asserteq(success, true, "pcall should succeed for 1 successful test") - - local success, msg = pcall(multSucTest.run, multSucTest) - tester:asserteq(success, true, - "pcall should succeed for 2+ successful tests") - - local success, msg = pcall(failTest.run, failTest) - tester:asserteq(success, false, "pcall should fail for tests with failure") - - local success, msg = pcall(errTest.run, errTest) - tester:asserteq(success, false, "pcall should fail for tests with error") - - local success, msg = pcall(errFailTest.run, errFailTest) - tester:asserteq(success, false, "pcall should fail for error+fail tests") - - local success, msg = pcall(errSucTest.run, errSucTest) - tester:asserteq(success, false, "pcall should fail for error+success tests") - - local success, msg = pcall(failSucTest.run, failSucTest) - tester:asserteq(success, false, "pcall should fail for fail+success tests") - - local success, msg = pcall(failSucErrTest.run, failSucErrTest) - tester:asserteq(success, false, - "pcall should fail for fail+success+err test") - - enableIoWrite() - - tester:asserteq(emptyTest.countasserts, 0, - "emptyTest should have 0 asserts") - tester:asserteq(sucTest.countasserts, 1, "sucTest should have 1 assert") - tester:asserteq(multSucTest.countasserts, 4, - "multSucTest should have 4 asserts") - tester:asserteq(failTest.countasserts, 1, "failTest should have 1 assert") - tester:asserteq(errTest.countasserts, 0, "errTest should have 0 asserts") - tester:asserteq(errFailTest.countasserts, 1, - "errFailTest should have 1 assert") - tester:asserteq(errSucTest.countasserts, 1, - "errSucTest should have 0 asserts") - tester:asserteq(failSucTest.countasserts, 2, - "failSucTest should have 2 asserts") -end - -function tests.checkNestedTestsForbidden() - disableIoWrite() - - local myTester = torch.Tester() - local myTests = {{function() end}} - tester:assertErrorPattern(function() myTester:add(myTests) end, - "Nested sets", - "tester should forbid adding nested test sets") - - enableIoWrite() -end - -function tests.checkWarningOnAssertObject() - -- This test checks that calling assert with an object generates a warning - local myTester = torch.Tester() - local myTests = {} - function myTests.assertAbuse() - myTester:assert({}) - end - myTester:add(myTests) - - local warningGiven = false - io.write = function(s) - if string.match(s, 'should only be used for boolean') then - warningGiven = true - end - end - - myTester:run() - enableIoWrite() - - tester:assert(warningGiven, "Should warn on calling :assert(object)") -end - -function tests.checkWarningOnAssertNeObject() - -- This test checks that calling assertne with two objects generates warning - local myTester = torch.Tester() - local myTests = {} - function myTests.assertAbuse() - myTester:assertne({}, {}) - end - myTester:add(myTests) - - local warningGiven = false - io.write = function(s) - if string.match(s, 'assertne should only be used to compare basic') then - warningGiven = true - end - end - - myTester:run() - enableIoWrite() - - tester:assert(warningGiven, "Should warn on calling :assertne(obj, obj)") -end - -function tests.checkWarningOnExtraAssertArguments() - -- This test checks that calling assert with extra args gives a lua error - local myTester = torch.Tester() - local myTests = {} - function myTests.assertAbuse() - myTester:assert(true, "some message", "extra argument") - end - myTester:add(myTests) - - local errorGiven = false - io.write = function(s) - if string.match(s, 'Unexpected arguments') then - errorGiven = true - end - end - tester:assertError(function() myTester:run() end) - enableIoWrite() - - tester:assert(errorGiven, ":assert should fail on extra arguments") -end - -function tests.checkWarningOnUsingTable() - -- Checks that if we don't use a TestSuite then gives a warning - local myTester = torch.Tester() - local myTests = {} - myTester:add(myTests) - - local errorGiven = false - io.write = function(s) - if string.match(s, 'use TestSuite rather than plain lua table') then - errorGiven = true - end - end - myTester:run() - - enableIoWrite() - tester:assert(errorGiven, "Using a plain lua table for testsuite should warn") -end - -function tests.checkMaxAllowedSetUpAndTearDown() - -- Checks can have at most 1 set-up and at most 1 tear-down function - local function f() end - local myTester = torch.Tester() - - for _, name in ipairs({'_setUp', '_tearDown'}) do - tester:assertNoError(function() myTester:add(f, name) end, - "Adding 1 set-up / tear-down should be fine") - tester:assertErrorPattern(function() myTester:add(f, name) end, - "Only one", - "Adding second set-up / tear-down should fail") - end -end - -function tests.test_setUp() - tester:asserteq(test_name_passed_to_setUp, 'test_setUp') - for key, value in pairs(tester.tests) do - tester:assertne(key, '_setUp') - end -end - -function tests.test_tearDown() - for key, value in pairs(tester.tests) do - tester:assertne(key, '_tearDown') - end -end - -function tests._setUp(name) - test_name_passed_to_setUp = name - calls_to_setUp = calls_to_setUp + 1 -end - -function tests._tearDown(name) - calls_to_tearDown = calls_to_tearDown + 1 -end - -tester:add(tests):run() - --- Additional tests to check that _setUp and _tearDown were called. -local test_count = 0 -for _ in pairs(tester.tests) do - test_count = test_count + 1 -end -local postTests = torch.TestSuite() -local postTester = torch.Tester() - -function postTests.test_setUp(tester) - postTester:asserteq(calls_to_setUp, test_count, - "Expected " .. test_count .. " calls to _setUp") -end - -function postTests.test_tearDown() - postTester:asserteq(calls_to_tearDown, test_count, - "Expected " .. test_count .. " calls to _tearDown") -end - -postTester:add(postTests):run() diff --git a/contrib/lua-torch/torch7/test/test_half.lua b/contrib/lua-torch/torch7/test/test_half.lua deleted file mode 100644 index bf3830b5ef..0000000000 --- a/contrib/lua-torch/torch7/test/test_half.lua +++ /dev/null @@ -1,55 +0,0 @@ -local mytester -local torchtest = torch.TestSuite() - --- Lua 5.2 compatibility -local loadstring = loadstring or load -local unpack = unpack or table.unpack - -function torchtest.easy() - local x=torch.randn(5, 6):half() - mytester:assert(x:isContiguous(), 'x should be contiguous') - mytester:assert(x:dim() == 2, 'x should have dim of 2') - mytester:assert(x:nDimension() == 2, 'x should have nDimension of 2') - mytester:assert(x:nElement() == 5 * 6, 'x should have 30 elements') - local stride = x:stride() - local expectedStride = torch.LongStorage{6,1} - for i=1,stride:size() do - mytester:assert(stride[i] == expectedStride[i], "stride is wrong") - end - - x=x:t() - mytester:assert(not x:isContiguous(), 'x transpose should not be contiguous') - x=x:transpose(1,2) - mytester:assert(x:isContiguous(), 'x should be contiguous after 2 transposes') - - local y=torch.HalfTensor() - y:resizeAs(x:t()):copy(x:t()) - mytester:assert(x:isContiguous(), 'after resize and copy, x should be contiguous') - mytester:assertTensorEq(y, x:t(), 0.001, 'copy broken after resizeAs') - local z=torch.HalfTensor() - z:resize(6, 5):copy(x:t()) - mytester:assertTensorEq(y, x:t(), 0.001, 'copy broken after resize') -end - -function torchtest.narrowSub() - local x = torch.randn(5, 6):half() - local narrow = x:narrow(1, 2, 3) - local sub = x:sub(2, 4) - mytester:assertTensorEq(narrow, sub, 0.001, 'narrow not equal to sub') -end - -function torchtest.selectClone() - local x = torch.zeros(5, 6) - x:select(1,2):fill(2) - x=x:half() - local y=x:clone() - mytester:assertTensorEq(x, y, 0.001, 'not equal after select and clone') - x:select(1,1):fill(3) - mytester:assert(y[1][1] == 0, 'clone broken') -end - -torch.setheaptracking(true) -math.randomseed(os.time()) -mytester = torch.Tester() -mytester:add(torchtest) -mytester:run(tests) diff --git a/contrib/lua-torch/torch7/test/test_qr.lua b/contrib/lua-torch/torch7/test/test_qr.lua deleted file mode 100644 index c850c3fe1a..0000000000 --- a/contrib/lua-torch/torch7/test/test_qr.lua +++ /dev/null @@ -1,274 +0,0 @@ --- This file contains tests for the QR decomposition functions in torch: --- torch.qr(), torch.geqrf() and torch.orgqr(). -local torch = require 'torch' -local tester = torch.Tester() -local tests = torch.TestSuite() - --- torch.qr() with result tensors given. -local function qrInPlace(tensorFunc) - return function(x) - local q, r = tensorFunc(), tensorFunc() - torch.qr(q, r, x:clone()) - return q, r - end -end - --- torch.qr() without result tensors given. -local function qrReturned(tensorFunc) - return function(x) - return torch.qr(x:clone()) - end -end - --- torch.geqrf() with result tensors given. -local function geqrfInPlace(tensorFunc) - return function(x) - local result = tensorFunc() - local tau = tensorFunc() - local result_, tau_ = torch.geqrf(result, tau, x) - assert(torch.pointer(result) == torch.pointer(result_), - 'expected result, result_ same tensor') - assert(torch.pointer(tau) == torch.pointer(tau_), - 'expected tau, tau_ same tensor') - return result_, tau_ - end -end - --- torch.orgqr() with result tensors given. -local function orgqrInPlace(tensorFunc) - return function(result, tau) - local q = tensorFunc() - local q_ = torch.orgqr(q, result, tau) - assert(torch.pointer(q) == torch.pointer(q_), 'expected q, q_ same tensor') - return q - end -end - --- Test a custom QR routine that calls the LAPACK functions manually. -local function qrManual(geqrfFunc, orgqrFunc) - return function(x) - local m = x:size(1) - local n = x:size(2) - local k = math.min(m, n) - local result, tau = geqrfFunc(x) - assert(result:size(1) == m) - assert(result:size(2) == n) - assert(tau:size(1) == k) - local r = torch.triu(result:narrow(1, 1, k)) - local q = orgqrFunc(result, tau) - return q:narrow(2, 1, k), r - end -end - --- Check that Q multiplied with a matrix with ormqr gives the correct result -local function checkQM(testOpts, mat1, mat2) - local q, r = torch.qr(mat1) - local m, tau = torch.geqrf(mat1) - local requiredPrecision = 1e-5 - tester:assertTensorEq(torch.mm(q, mat2), torch.ormqr(m, tau, mat2), - requiredPrecision) - tester:assertTensorEq(torch.mm(mat2, q), torch.ormqr(m, tau, mat2, 'R'), - requiredPrecision) - tester:assertTensorEq(torch.mm(q:t(), mat2), - torch.ormqr(m, tau, mat2, 'L', 'T'), requiredPrecision) - tester:assertTensorEq(torch.mm(mat2, q:t()), - torch.ormqr(m, tau, mat2, 'R', 'T'), requiredPrecision) -end - --- Check that the given `q`, `r` matrices are a valid QR decomposition of `a`. -local function checkQR(testOpts, a, q, r) - local qrFunc = testOpts.qr - if not q then - q, r = qrFunc(a) - end - local k = math.min(a:size(1), a:size(2)) - tester:asserteq(q:size(1), a:size(1), "Bad size for q first dimension.") - tester:asserteq(q:size(2), k, "Bad size for q second dimension.") - tester:asserteq(r:size(1), k, "Bad size for r first dimension.") - tester:asserteq(r:size(2), a:size(2), "Bad size for r second dimension.") - tester:assertTensorEq(q:t() * q, - torch.eye(q:size(2)):typeAs(testOpts.tensorFunc()), - testOpts.precision, - "Q was not orthogonal") - tester:assertTensorEq(r, r:triu(), testOpts.precision, - "R was not upper triangular") - tester:assertTensorEq(q * r, a, testOpts.precision, "QR = A") -end - --- Do a QR decomposition of `a` and check that the result is valid and matches --- the given expected `q` and `r`. -local function checkQRWithExpected(testOpts, a, expected_q, expected_r) - local qrFunc = testOpts.qr - -- Since the QR decomposition is unique only up to the signs of the rows of - -- R, we must ensure these are positive before doing the comparison. - local function canonicalize(q, r) - local d = r:diag():sign():diag() - return q * d, d * r - end - local q, r = qrFunc(a) - local q_canon, r_canon = canonicalize(q, r) - local expected_q_canon, expected_r_canon - = canonicalize(expected_q, expected_r) - tester:assertTensorEq(q_canon, expected_q_canon, testOpts.precision, - "Q did not match expected") - tester:assertTensorEq(r_canon, expected_r_canon, testOpts.precision, - "R did not match expected") - checkQR(testOpts, a, q, r) -end - --- Generate a separate test based on `func` for each of the possible --- combinations of tensor type (double or float) and QR function (torch.qr --- in-place, torch.qr, and manually calling the geqrf and orgqr from Lua --- (both in-place and not). --- --- The tests are added to the given `tests` table, with names generated by --- appending a unique string for the specific combination to `name`. --- --- If opts.doubleTensorOnly is true, then the FloatTensor versions of the test --- will be skipped. -local function addTestVariations(tests, name, func, opts) - opts = opts or {} - local tensorTypes = { - [torch.DoubleTensor] = 1e-12, - [torch.FloatTensor] = 1e-5, - } - for tensorFunc, requiredPrecision in pairs(tensorTypes) do - local qrFuncs = { - ['inPlace'] = qrInPlace(tensorFunc), - ['returned'] = qrReturned(tensorFunc), - ['manualInPlace'] = qrManual(geqrfInPlace(tensorFunc), - orgqrInPlace(tensorFunc)), - ['manualReturned'] = qrManual(torch.geqrf, torch.orgqr) - } - for qrName, qrFunc in pairs(qrFuncs) do - local testOpts = { - tensorFunc=tensorFunc, - precision=requiredPrecision, - qr=qrFunc, - } - local tensorType = tensorFunc():type() - local fullName = name .. "_" .. qrName .. "_" .. tensorType - assert(not tests[fullName]) - if tensorType == 'torch.DoubleTensor' or not opts.doubleTensorOnly then - tests[fullName] = function() - local state = torch.getRNGState() - torch.manualSeed(1) - func(testOpts) - torch.setRNGState(state) - end - end - end - end -end - --- Decomposing a specific square matrix. -addTestVariations(tests, 'qrSquare', function(testOpts) - return function(testOpts) - local tensorFunc = testOpts.tensorFunc - local a = tensorFunc{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}} - local expected_q = tensorFunc{ - {-1.230914909793328e-01, 9.045340337332914e-01, - 4.082482904638621e-01}, - {-4.923659639173310e-01, 3.015113445777629e-01, - -8.164965809277264e-01}, - {-8.616404368553292e-01, -3.015113445777631e-01, - 4.082482904638634e-01}, - } - local expected_r = tensorFunc{ - {-8.124038404635959e+00, -9.601136296387955e+00, - -1.107823418813995e+01}, - { 0.000000000000000e+00, 9.045340337332926e-01, - 1.809068067466585e+00}, - { 0.000000000000000e+00, 0.000000000000000e+00, - -8.881784197001252e-16}, - } - checkQRWithExpected(testOpts, a, expected_q, expected_r) - end -end, {doubleTensorOnly=true}) - --- Decomposing a specific (wide) rectangular matrix. -addTestVariations(tests, 'qrRectFat', function(testOpts) - -- The matrix is chosen to be full-rank. - local a = testOpts.tensorFunc{ - {1, 2, 3, 4}, - {5, 6, 7, 8}, - {9, 10, 11, 13} - } - local expected_q = testOpts.tensorFunc{ - {-0.0966736489045663, 0.907737593658436 , 0.4082482904638653}, - {-0.4833682445228317, 0.3157348151855452, -0.8164965809277254}, - {-0.870062840141097 , -0.2762679632873518, 0.4082482904638621} - } - local expected_r = testOpts.tensorFunc{ - { -1.0344080432788603e+01, -1.1794185166357092e+01, - -1.3244289899925587e+01, -1.5564457473635180e+01}, - { 0.0000000000000000e+00, 9.4720444555662542e-01, - 1.8944088911132546e+00, 2.5653453733825331e+00}, - { 0.0000000000000000e+00, 0.0000000000000000e+00, - 1.5543122344752192e-15, 4.0824829046386757e-01} - } - checkQRWithExpected(testOpts, a, expected_q, expected_r) -end, {doubleTensorOnly=true}) - --- Decomposing a specific (thin) rectangular matrix. -addTestVariations(tests, 'qrRectThin', function(testOpts) - -- The matrix is chosen to be full-rank. - local a = testOpts.tensorFunc{ - { 1, 2, 3}, - { 4, 5, 6}, - { 7, 8, 9}, - {10, 11, 13}, - } - local expected_q = testOpts.tensorFunc{ - {-0.0776150525706334, -0.833052161400748 , 0.3651483716701106}, - {-0.3104602102825332, -0.4512365874254053, -0.1825741858350556}, - {-0.5433053679944331, -0.0694210134500621, -0.7302967433402217}, - {-0.7761505257063329, 0.3123945605252804, 0.5477225575051663} - } - local expected_r = testOpts.tensorFunc{ - {-12.8840987267251261, -14.5916298832790581, -17.0753115655393231}, - { 0, -1.0413152017509357, -1.770235842976589 }, - { 0, 0, 0.5477225575051664} - } - checkQRWithExpected(testOpts, a, expected_q, expected_r) -end, {doubleTensorOnly=true}) - --- Decomposing a sequence of medium-sized random matrices. -addTestVariations(tests, 'randomMediumQR', function(testOpts) - for x = 0, 10 do - for y = 0, 10 do - local m = math.pow(2, x) - local n = math.pow(2, y) - local x = torch.rand(m, n) - checkQR(testOpts, x:typeAs(testOpts.tensorFunc())) - end - end -end) - --- Decomposing a sequence of small random matrices. -addTestVariations(tests, 'randomSmallQR', function(testOpts) - for m = 1, 40 do - for n = 1, 40 do - checkQR(testOpts, torch.rand(m, n):typeAs(testOpts.tensorFunc())) - end - end -end) - --- Decomposing a sequence of small matrices that are not contiguous in memory. -addTestVariations(tests, 'randomNonContiguous', function(testOpts) - for m = 2, 40 do - for n = 2, 40 do - local x = torch.rand(m, n):t() - tester:assert(not x:isContiguous(), "x should not be contiguous") - checkQR(testOpts, x:typeAs(testOpts.tensorFunc())) - end - end -end) - -function tests.testQM() - checkQM({}, torch.randn(10, 10), torch.randn(10, 10)) - -- checkQM({}, torch.randn(20, 10), torch.randn(20, 20)) -end - -tester:add(tests) -tester:run() diff --git a/contrib/lua-torch/torch7/test/test_sharedmem.lua b/contrib/lua-torch/torch7/test/test_sharedmem.lua deleted file mode 100644 index 1230e59149..0000000000 --- a/contrib/lua-torch/torch7/test/test_sharedmem.lua +++ /dev/null @@ -1,92 +0,0 @@ -require 'torch' -local ffi = require 'ffi' - -local tester = torch.Tester() -local tests = torch.TestSuite() - -local function createSharedMemStorage(name, size, storageType) - local storageType = storageType or 'FloatStorage' - local shmName = name or os.tmpname():gsub('/','_') - local isShared = true - local isSharedMem = true - local nElements = size or torch.random(10000, 20000) - local storage = torch[storageType](shmName, isShared, nElements, isSharedMem) - return storage, shmName -end - -local function shmFilePath(shmName) - return (ffi.os ~= 'Windows' and '/dev/shm/' or '') .. shmName -end - -local function removeShmFile(shmFileName) - if ffi.os == 'Windows' then - os.remove(shmFileName) - end -end - -function tests.createSharedMemFile() - local storage, shmName = createSharedMemStorage() - local shmFileName = shmFilePath(shmName) - - -- check that file is at /dev/shm - tester:assert(paths.filep(shmFileName), - 'Shared memory file exists') - - -- collect storage and make sure that file is gone - storage = nil - collectgarbage() - collectgarbage() - removeShmFile(shmFileName) - tester:assert(not paths.filep(shmFileName), - 'Shared memory file does not exists') -end - -function tests.checkContents() - local storage, shmName = createSharedMemStorage() - local shmFileName = shmFilePath(shmName) - local tensor = torch.FloatTensor(storage, 1, torch.LongStorage{storage:size()}) - tensor:copy(torch.rand(storage:size())) - - local sharedFile = torch.DiskFile(shmFileName, 'r'):binary() - for i = 1, storage:size() do - tester:assert(sharedFile:readFloat() == storage[i], 'value is not correct') - end - sharedFile:close() - removeShmFile(shmFileName) -end - -function tests.testSharing() - -- since we are going to cast numbers into double (lua default) - -- we specifically generate double storage - local storage, shmName = createSharedMemStorage(nil, nil, 'DoubleStorage') - local shmFileName = shmFilePath(shmName) - local tensor = torch.DoubleTensor(storage, 1, torch.LongStorage{storage:size()}) - tensor:copy(torch.rand(storage:size())) - local tensorCopy = tensor.new():resizeAs(tensor):copy(tensor) - - -- access the same shared memory file as regular mapping from same process - local storage2 = torch.DoubleStorage(shmFileName, true, storage:size()) - local tensor2 = torch.DoubleTensor(storage2, 1, - torch.LongStorage{storage2:size()}) - local tensor2Copy = tensor2.new():resizeAs(tensor2):copy(tensor2) - - tester:assertTensorEq(tensorCopy, tensor2Copy, 0, 'contents don\'t match') - - -- fill tensor 1 with a random value and read from 2 - local rval = torch.uniform() - tensor:fill(rval) - for i = 1, tensor2:size(1) do - tester:asserteq(tensor2[i], rval, 'content is wrong') - end - - -- fill tensor 2 with a random value and read from 1 - local rval = torch.uniform() - tensor2:fill(rval) - for i = 1, tensor:size(1) do - tester:asserteq(tensor[i], rval, 'content is wrong') - end - removeShmFile(shmFileName) -end - -tester:add(tests) -tester:run() diff --git a/contrib/lua-torch/torch7/test/test_timer.lua b/contrib/lua-torch/torch7/test/test_timer.lua deleted file mode 100644 index ecf576a1ac..0000000000 --- a/contrib/lua-torch/torch7/test/test_timer.lua +++ /dev/null @@ -1,52 +0,0 @@ -require 'torch' -local ffi = require 'ffi' - -local tester = torch.Tester() -local tests = torch.TestSuite() - -function tests.timerTime() - local timer = torch.Timer() - - local function wait(seconds) - if ffi.os == 'Windows' then - os.execute(string.format('ping 127.0.0.1 -n %d > nul', seconds + 1)) - else - os.execute(string.format('sleep %d > nul', seconds)) - end - end - - timer:reset() - wait(1) - local passed_time = timer:time().real - tester:assert(passed_time < 1.1, - ("Too long time passed: %.1f sec >= 1.1 sec"):format(passed_time)) - tester:assert(passed_time > 0.9, - ("Too short time passed: %.1f sec <= 0.9 sec"):format(passed_time)) - - timer:stop() - wait(1) - passed_time = timer:time().real - tester:assert(passed_time < 1.1, - ("Too long time passed: %.1f sec >= 1.1 sec"):format(passed_time)) - tester:assert(passed_time > 0.9, - ("Too short time passed: %.1f sec <= 0.9 sec"):format(passed_time)) - - timer:resume() - wait(1) - passed_time = timer:time().real - tester:assert(passed_time < 2.2, - ("Too long time passed: %.1f sec >= 2.2 sec"):format(passed_time)) - tester:assert(passed_time > 1.8, - ("Too short time passed: %.1f sec <= 1.8 sec"):format(passed_time)) - - timer:reset() - wait(1) - passed_time = timer:time().real - tester:assert(passed_time < 1.1, - ("Too long time passed: %.1f sec >= 1.1 sec"):format(passed_time)) - tester:assert(passed_time > 0.9, - ("Too short time passed: %.1f sec <= 0.9 sec"):format(passed_time)) -end - -tester:add(tests) -tester:run() diff --git a/contrib/lua-torch/torch7/test/test_writeObject.lua b/contrib/lua-torch/torch7/test/test_writeObject.lua deleted file mode 100644 index 52bcb71739..0000000000 --- a/contrib/lua-torch/torch7/test/test_writeObject.lua +++ /dev/null @@ -1,238 +0,0 @@ -require 'torch' - -local myTester = torch.Tester() - -local tests = torch.TestSuite() - -function torch.HalfTensor:norm() - return self:real():norm() -end - --- checks that an object can be written and unwritten --- returns false if an error occurs -local function serializeAndDeserialize(obj) - local file = torch.MemoryFile() - file:binary() - local ok, msg = pcall (file.writeObject, file, obj) - myTester:assert(ok, 'error in writing an object' ) - file:seek(1) - local ok, copy = pcall(file.readObject, file) - if not ok then print(copy) end - myTester:assert(ok, 'error in reading an object ') - return copy -end - -function tests.test_can_write_a_nil_closure() - local a - local function closure() - if not a then return 1 end - return 0 - end - - local copyClosure = serializeAndDeserialize(closure) - myTester:assert(copyClosure() == closure(), 'the closures should give same output') -end - -function tests.test_nil_upvalues_in_closure() - local a = 1 - local b - local c = 2 - local function closure() - if not b then return c end - return a - end - - local copyClosure = serializeAndDeserialize(closure) - myTester:assert(copyClosure() == closure(), 'the closures should give same output') -end - -function tests.test_global_function_in_closure() - local x = "5" - local function closure(str) - return tonumber(str .. x) - end - - local copyClosure = serializeAndDeserialize(closure) - myTester:assert(copyClosure("3") == closure("3"), 'the closures should give same output') -end - -function tests.test_a_recursive_closure() - local foo - - foo = function (level) - if level == 1 then return 1 end - return 1+foo(level-1) - end - - local copyFoo = serializeAndDeserialize(foo) - myTester:assert(copyFoo(42) == foo(42), 'the closures should give same output') -end - -function tests.test_a_tensor() - for k,v in ipairs({"real", "half"}) do - tests_test_a_tensor(torch.getmetatable(torch.Tensor():type())[v]) - end -end - -function tests_test_a_tensor(func) - local x = func(torch.rand(5, 10)) - local xcopy = serializeAndDeserialize(x) - myTester:assert(x:norm() == xcopy:norm(), 'tensors should be the same') -end - --- Regression test for bug reported in issue 456. -function tests.test_empty_table() - local file = torch.MemoryFile() - file:writeObject({}) -end - -function tests.test_error_msg() - local torch = torch - local inner = { - baz = function(a) torch.somefunc() end - } - local outer = { - theinner = inner - } - local function evil_func() - outer.prop = 1 - image.compress(1) - end - local ok, msg = pcall(torch.save, 'saved.t7', evil_func) - myTester:assert(not ok) - myTester:assert(msg:find('at <%?>%.outer%.theinner%.baz%.torch') ~= nil) -end - -function tests.test_warning_msg() - local foo = {} - torch.class('Bar', foo) - - local obj = foo.Bar() - local tensor = torch.Tensor() - obj.data = tensor:cdata() -- pick something NOT writable - - local file = torch.MemoryFile('rw'):binary() - local ok, _ = pcall(torch.File.writeObject, file, obj) - -- only a warning is printed on STDOUT: - -- $ Warning: cannot write object field of - myTester:assert(ok) - file:close() -end - -function tests.test_referenced() - local file = torch.MemoryFile('rw'):binary() - file:referenced(false) - - local foo = 'bar' - file:writeObject(foo) - file:close() -end - -function tests.test_shared_upvalues() - if debug.upvalueid then - local i=1 - local j=2 - - local func = {} - - func.increment = function() - i=i+1 - j=j+2 - end - func.get_i = function() - return i - end - func.get_j = function() - return j - end - - local copyFunc = serializeAndDeserialize(func) - myTester:assert(copyFunc.get_i()==1) - myTester:assert(copyFunc.get_j()==2) - copyFunc.increment() - myTester:assert(copyFunc.get_i()==2) - myTester:assert(copyFunc.get_j()==4) - else - print('Not running shared upvalues test, as we are in Lua-5.1') - end -end - - --- checks that the hook function works properly --- returns false if an error occurs -function tests.test_SerializationHook() - -- Simpel uuid implementation from [https://gist.github.com/jrus/3197011] - -- The only goal is to aoid collisions within the scope of tests, - -- so more than enough. - local random = math.random - local function uuid() - local template ='xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx' - return string.gsub(template, '[xy]', function (c) - local v = (c == 'x') and random(0, 0xf) or random(8, 0xb) - return string.format('%x', v) - end) - end - local unique1 = uuid() - local unique2 = uuid() - local class = {} - -- Create 2 classes - local spec = torch.class('class.'.. unique1, class) - function spec:test() - return false - end - local gen = torch.class('class.' .. unique2, class) - function gen:test() - return true - end - local hook = function(object) - local class = class - local newObject = object - if torch.typename(object) == 'class.'..unique1 then - newObject = class[unique2]() - end - return newObject - end - - -- Write to 2 files, first without hooking, - -- second with hooking - local file = torch.MemoryFile('rw') - file:binary() - local file2 = torch.MemoryFile('rw') - file2:binary() - local s = class[unique1]() - local object = {s1 = s, v = 'test', g = class[unique2](), s2 = s} - file:writeObject(object) - file2:writeObject(object, nil, hook) - - -- unregister class[unique1] and try to reload the first serialized object - if debug and debug.getregistry then - local ok, res = pcall(function() classTestSerializationHook1 = nil debug.getregistry()[classTestSerializationHook1] = nil file:seek(1) return file:readObject() end) - myTester:assert(not ok) - else - print('Not running serialization hook failure test because debug is missing.') - end - - -- Try to reload the second serialized object - local ok, clone = pcall(function() file2:seek(1) return file2:readObject() end) - - -- Test that everything happened smoothly - myTester:assert(clone.v == 'test') - myTester:assert(torch.typename(clone.s1) == 'class.' .. unique2) - myTester:assert(clone.s1:test() and clone.s2:test()) - myTester:assert(string.format('%x',torch.pointer(clone.s1)) == string.format('%x',torch.pointer(clone.s2))) -end - -function tests.test_serializeToStorage() - torch.save("foo.t7", "foo") - local f = io.open("foo.t7", "rb") - local size = f:seek("end") - f:close() - myTester:eq( - torch.serializeToStorage("foo"):size(), size, - "memory and disk serializations should have the same size" - ) -end - -myTester:add(tests) -myTester:run() -if myTester.errors[1] then os.exit(1) end diff --git a/contrib/lua-torch/torch7/test/timeSort.lua b/contrib/lua-torch/torch7/test/timeSort.lua deleted file mode 100644 index ad513b8777..0000000000 --- a/contrib/lua-torch/torch7/test/timeSort.lua +++ /dev/null @@ -1,153 +0,0 @@ --- gnuplot.figure(2) --- Test torch sort, show it suffers from the problems of quicksort --- i.e. complexity O(N^2) in worst-case of sorted list -require 'gnuplot' -local ffi = require 'ffi' - -local cmd = torch.CmdLine() -cmd:option('-N', 10^7, 'Maximum array size') -cmd:option('-p', 50, 'Number of points in logspace') -cmd:option('-r', 20, 'Number of repetitions') - -local options = cmd:parse(arg or {}) -function main() - local log10 = math.log10 or function(x) return math.log(x, 10) end - local pow10 = torch.linspace(1,log10(options.N), options.p) - local num_sizes = options.p - local num_reps = options.r - - local old_rnd = torch.zeros(num_sizes, num_reps) - local old_srt = torch.zeros(num_sizes, num_reps) - local old_cst = torch.zeros(num_sizes, num_reps) - local new_rnd = torch.zeros(num_sizes, num_reps) - local new_srt = torch.zeros(num_sizes, num_reps) - local new_cst = torch.zeros(num_sizes, num_reps) - local ratio_rnd = torch.zeros(num_sizes, num_reps) - local ratio_srt = torch.zeros(num_sizes, num_reps) - local ratio_cst = torch.zeros(num_sizes, num_reps) - - -- Ascending sort uses new sort - local function time_sort(x) - collectgarbage() - local start = os.clock() - torch.sort(x,false) - return (os.clock()-start) - end - - -- Descending sort uses old sort - local function time_old_sort(x) - collectgarbage() - local start = os.clock() - torch.sort(x,true) - return (os.clock()-start) - end - - local benches = { - function(i,j,n) - -- on random - local input = torch.rand(n) - new_rnd[i][j] = time_sort(input:clone()) - old_rnd[i][j] = time_old_sort(input:clone()) - end, - - function(i,j,n) - -- on sorted - new_srt[i][j] = time_sort(torch.linspace(0,1,n)) - old_srt[i][j] = time_old_sort(torch.linspace(0,1,n):add(-1):mul(-1)) -- old_time is called on descending sort, hence the reversed input - end, - - function(i,j,n) - -- on constant - new_cst[i][j] = time_sort(torch.zeros(n)) - old_cst[i][j] = time_old_sort(torch.zeros(n)) - end - } - - local num_benches = #benches - local num_exps = num_sizes * num_benches * num_reps - - -- Full randomization - local perm = torch.randperm(num_exps):long() - local perm_benches = torch.Tensor(num_exps) - local perm_reps = torch.Tensor(num_exps) - local perm_sizes = torch.Tensor(num_exps) - - local l = 1 - for i=1, num_sizes do - for j=1, num_reps do - for k=1, num_benches do - perm_benches[ perm[l] ] = k - perm_reps[ perm[l] ] = j - perm_sizes[ perm[l] ] = i - l = l+1 - end - end - end - - local pc = 0 - for j = 1, num_exps do - local n = 10^pow10[perm_sizes[j]] - -- print(string.format('rep %d / %d, bench %d, size %d, rep %d\n', j, num_exps, perm_benches[j], n, perm_reps[j])) - if math.floor(100*j/num_exps) > pc then - pc = math.floor(100*j/num_exps) - io.write('.') - if pc % 10 == 0 then - io.write(' ' .. pc .. '%\n') - end - io.flush() - end - benches[perm_benches[j]](perm_sizes[j], perm_reps[j], n) - end - - ratio_rnd = torch.cdiv(old_rnd:mean(2), new_rnd:mean(2)) - ratio_srt = torch.cdiv(old_srt:mean(2), new_srt:mean(2)) - ratio_cst = torch.cdiv(old_cst:mean(2), new_cst:mean(2)) - - local N = pow10:clone():apply(function(x) return 10^x end) - - if ffi.os == 'Windows' then - gnuplot.setterm('windows') - else - gnuplot.setterm('x11') - end - gnuplot.figure(1) - gnuplot.raw('set log x; set mxtics 10') - gnuplot.raw('set grid mxtics mytics xtics ytics') - gnuplot.raw('set xrange [' .. N:min() .. ':' .. N:max() .. ']' ) - gnuplot.plot({'Random - new', N, new_rnd:mean(2)}, - {'Sorted - new', N, new_srt:mean(2)}, - {'Constant - new', N, new_cst:mean(2)}, - {'Random - old', N, old_rnd:mean(2)}, - {'Sorted - old', N, old_srt:mean(2)}, - {'Constant - old', N, old_cst:mean(2)}) - gnuplot.xlabel('N') - gnuplot.ylabel('Time (s)') - gnuplot.figprint('benchmarkTime.png') - - gnuplot.figure(2) - gnuplot.raw('set log x; set mxtics 10') - gnuplot.raw('set grid mxtics mytics xtics ytics') - gnuplot.raw('set xrange [' .. N:min() .. ':' .. N:max() .. ']' ) - gnuplot.plot({'Random', N, ratio_rnd:mean(2)}, - {'Sorted', N, ratio_srt:mean(2)}, - {'Constant', N, ratio_cst:mean(2)}) - gnuplot.xlabel('N') - gnuplot.ylabel('Speed-up Factor (s)') - gnuplot.figprint('benchmarkRatio.png') - - torch.save('benchmark.t7', { - new_rnd=new_rnd, - new_srt=new_srt, - new_cst=new_cst, - old_rnd=old_rnd, - old_srt=old_srt, - old_cst=old_cst, - ratio_rnd=ratio_rnd, - ratio_srt=ratio_srt, - ratio_cst=ratio_cst, - pow10 = pow10, - num_reps = num_reps - }) -end - -main() diff --git a/contrib/lua-torch/torch7/torchcwrap.lua b/contrib/lua-torch/torch7/torchcwrap.lua deleted file mode 100644 index 551bd05d20..0000000000 --- a/contrib/lua-torch/torch7/torchcwrap.lua +++ /dev/null @@ -1,515 +0,0 @@ -local wrap = require 'cwrap' -local types = wrap.types - -types.Tensor = { - - helpname = function(arg) - if arg.dim then - return string.format("Tensor~%dD", arg.dim) - else - return "Tensor" - end - end, - - declare = function(arg) - local txt = {} - table.insert(txt, string.format("THTensor *arg%d = NULL;", arg.i)) - if arg.returned then - table.insert(txt, string.format("int arg%d_idx = 0;", arg.i)); - end - return table.concat(txt, '\n') - end, - - check = function(arg, idx) - if arg.dim then - return string.format("(arg%d = luaT_toudata(L, %d, torch_Tensor)) && (arg%d->nDimension == %d)", arg.i, idx, arg.i, arg.dim) - else - return string.format("(arg%d = luaT_toudata(L, %d, torch_Tensor))", arg.i, idx) - end - end, - - read = function(arg, idx) - if arg.returned then - return string.format("arg%d_idx = %d;", arg.i, idx) - end - end, - - init = function(arg) - if type(arg.default) == 'boolean' then - return string.format('arg%d = THTensor_(new)();', arg.i) - elseif type(arg.default) == 'number' then - return string.format('arg%d = %s;', arg.i, arg.args[arg.default]:carg()) - else - error('unknown default tensor type value') - end - end, - - carg = function(arg) - return string.format('arg%d', arg.i) - end, - - creturn = function(arg) - return string.format('arg%d', arg.i) - end, - - precall = function(arg) - local txt = {} - if arg.default and arg.returned then - table.insert(txt, string.format('if(arg%d_idx)', arg.i)) -- means it was passed as arg - table.insert(txt, string.format('lua_pushvalue(L, arg%d_idx);', arg.i)) - table.insert(txt, string.format('else')) - if type(arg.default) == 'boolean' then -- boolean: we did a new() - table.insert(txt, string.format('luaT_pushudata(L, arg%d, torch_Tensor);', arg.i)) - else -- otherwise: point on default tensor --> retain - table.insert(txt, string.format('{')) - table.insert(txt, string.format('THTensor_(retain)(arg%d);', arg.i)) -- so we need a retain - table.insert(txt, string.format('luaT_pushudata(L, arg%d, torch_Tensor);', arg.i)) - table.insert(txt, string.format('}')) - end - elseif arg.default then - -- we would have to deallocate the beast later if we did a new - -- unlikely anyways, so i do not support it for now - if type(arg.default) == 'boolean' then - error('a tensor cannot be optional if not returned') - end - elseif arg.returned then - table.insert(txt, string.format('lua_pushvalue(L, arg%d_idx);', arg.i)) - end - return table.concat(txt, '\n') - end, - - postcall = function(arg) - local txt = {} - if arg.creturned then - -- this next line is actually debatable - table.insert(txt, string.format('THTensor_(retain)(arg%d);', arg.i)) - table.insert(txt, string.format('luaT_pushudata(L, arg%d, torch_Tensor);', arg.i)) - end - return table.concat(txt, '\n') - end -} - -types.Generator = { - - helpname = function(arg) - return "Generator" - end, - - declare = function(arg) - return string.format("THGenerator *arg%d = NULL;", arg.i) - end, - - check = function(arg, idx) - return string.format("(arg%d = luaT_toudata(L, %d, torch_Generator))", arg.i, idx) - end, - - read = function(arg, idx) - end, - - init = function(arg) - local text = {} - -- If no generator is supplied, pull the default out of the torch namespace. - table.insert(text, 'lua_getglobal(L,"torch");') - table.insert(text, string.format('arg%d = luaT_getfieldcheckudata(L, -1, "_gen", torch_Generator);', arg.i)) - table.insert(text, 'lua_pop(L, 2);') - return table.concat(text, '\n') - end, - - carg = function(arg) - return string.format('arg%d', arg.i) - end, - - creturn = function(arg) - return string.format('arg%d', arg.i) - end, - - precall = function(arg) - end, - - postcall = function(arg) - end -} - -types.IndexTensor = { - - helpname = function(arg) - return "LongTensor" - end, - - declare = function(arg) - local txt = {} - table.insert(txt, string.format("THLongTensor *arg%d = NULL;", arg.i)) - if arg.returned then - table.insert(txt, string.format("int arg%d_idx = 0;", arg.i)); - end - return table.concat(txt, '\n') - end, - - check = function(arg, idx) - return string.format('(arg%d = luaT_toudata(L, %d, "torch.LongTensor"))', arg.i, idx) - end, - - read = function(arg, idx) - local txt = {} - if not arg.noreadadd then - table.insert(txt, string.format("THLongTensor_add(arg%d, arg%d, -1);", arg.i, arg.i)); - end - if arg.returned then - table.insert(txt, string.format("arg%d_idx = %d;", arg.i, idx)) - end - return table.concat(txt, '\n') - end, - - init = function(arg) - return string.format('arg%d = THLongTensor_new();', arg.i) - end, - - carg = function(arg) - return string.format('arg%d', arg.i) - end, - - creturn = function(arg) - return string.format('arg%d', arg.i) - end, - - precall = function(arg) - local txt = {} - if arg.default and arg.returned then - table.insert(txt, string.format('if(arg%d_idx)', arg.i)) -- means it was passed as arg - table.insert(txt, string.format('lua_pushvalue(L, arg%d_idx);', arg.i)) - table.insert(txt, string.format('else')) -- means we did a new() - table.insert(txt, string.format('luaT_pushudata(L, arg%d, "torch.LongTensor");', arg.i)) - elseif arg.default then - error('a tensor cannot be optional if not returned') - elseif arg.returned then - table.insert(txt, string.format('lua_pushvalue(L, arg%d_idx);', arg.i)) - end - return table.concat(txt, '\n') - end, - - postcall = function(arg) - local txt = {} - if arg.creturned or arg.returned then - table.insert(txt, string.format("THLongTensor_add(arg%d, arg%d, 1);", arg.i, arg.i)); - end - if arg.creturned then - -- this next line is actually debatable - table.insert(txt, string.format('THLongTensor_retain(arg%d);', arg.i)) - table.insert(txt, string.format('luaT_pushudata(L, arg%d, "torch.LongTensor");', arg.i)) - end - return table.concat(txt, '\n') - end -} - -for _,typename in ipairs({"ByteTensor", "CharTensor", "ShortTensor", "IntTensor", "LongTensor", - "FloatTensor", "HalfTensor", "DoubleTensor"}) do - - types[typename] = { - - helpname = function(arg) - if arg.dim then - return string.format('%s~%dD', typename, arg.dim) - else - return typename - end - end, - - declare = function(arg) - local txt = {} - table.insert(txt, string.format("TH%s *arg%d = NULL;", typename, arg.i)) - if arg.returned then - table.insert(txt, string.format("int arg%d_idx = 0;", arg.i)); - end - return table.concat(txt, '\n') - end, - - check = function(arg, idx) - if arg.dim then - return string.format('(arg%d = luaT_toudata(L, %d, "torch.%s")) && (arg%d->nDimension == %d)', arg.i, idx, typename, arg.i, arg.dim) - else - return string.format('(arg%d = luaT_toudata(L, %d, "torch.%s"))', arg.i, idx, typename) - end - end, - - read = function(arg, idx) - if arg.returned then - return string.format("arg%d_idx = %d;", arg.i, idx) - end - end, - - init = function(arg) - if type(arg.default) == 'boolean' then - return string.format('arg%d = TH%s_new();', arg.i, typename) - elseif type(arg.default) == 'number' then - return string.format('arg%d = %s;', arg.i, arg.args[arg.default]:carg()) - else - error('unknown default tensor type value') - end - end, - - carg = function(arg) - return string.format('arg%d', arg.i) - end, - - creturn = function(arg) - return string.format('arg%d', arg.i) - end, - - precall = function(arg) - local txt = {} - if arg.default and arg.returned then - table.insert(txt, string.format('if(arg%d_idx)', arg.i)) -- means it was passed as arg - table.insert(txt, string.format('lua_pushvalue(L, arg%d_idx);', arg.i)) - table.insert(txt, string.format('else')) - if type(arg.default) == 'boolean' then -- boolean: we did a new() - table.insert(txt, string.format('luaT_pushudata(L, arg%d, "torch.%s");', arg.i, typename)) - else -- otherwise: point on default tensor --> retain - table.insert(txt, string.format('{')) - table.insert(txt, string.format('TH%s_retain(arg%d);', typename, arg.i)) -- so we need a retain - table.insert(txt, string.format('luaT_pushudata(L, arg%d, "torch.%s");', arg.i, typename)) - table.insert(txt, string.format('}')) - end - elseif arg.default then - -- we would have to deallocate the beast later if we did a new - -- unlikely anyways, so i do not support it for now - if type(arg.default) == 'boolean' then - error('a tensor cannot be optional if not returned') - end - elseif arg.returned then - table.insert(txt, string.format('lua_pushvalue(L, arg%d_idx);', arg.i)) - end - return table.concat(txt, '\n') - end, - - postcall = function(arg) - local txt = {} - if arg.creturned then - -- this next line is actually debatable - table.insert(txt, string.format('TH%s_retain(arg%d);', typename, arg.i)) - table.insert(txt, string.format('luaT_pushudata(L, arg%d, "torch.%s");', arg.i, typename)) - end - return table.concat(txt, '\n') - end - } - - types[typename .. 'Array'] = { - - helpname = function(arg) - return string.format('{%s+}', typename) - end, - - declare = function(arg) - local txt = {} - table.insert(txt, string.format('TH%s **arg%d_data = NULL;', typename, arg.i)) - table.insert(txt, string.format('long arg%d_size = 0;', arg.i)) - table.insert(txt, string.format('int arg%d_i = 0;', arg.i)) - return table.concat(txt, '\n') - end, - - check = function(arg, idx) - return string.format('torch_isnonemptytable(L, %d)', idx) - end, - - read = function(arg, idx) - local txt = {} - -- Iterate over the array to find its length, leave elements on stack. - table.insert(txt, string.format('do')) - table.insert(txt, string.format('{')) - table.insert(txt, string.format(' arg%d_size++;', arg.i)) - table.insert(txt, string.format(' lua_checkstack(L, 1);')) - table.insert(txt, string.format(' lua_rawgeti(L, %d, arg%d_size);', idx, arg.i)) - table.insert(txt, string.format('}')) - table.insert(txt, string.format('while (!lua_isnil(L, -1));')) - table.insert(txt, string.format('arg%d_size--;', arg.i)) - -- Pop nil element from stack. - table.insert(txt, string.format('lua_pop(L, 1);')) - -- Allocate tensor pointers and read values from stack backwards. - table.insert(txt, string.format('arg%d_data = (TH%s**)THAlloc(arg%d_size * sizeof(TH%s*));', arg.i, typename, arg.i, typename)) - table.insert(txt, string.format('for (arg%d_i = arg%d_size - 1; arg%d_i >= 0; arg%d_i--)', arg.i, arg.i, arg.i, arg.i)) - table.insert(txt, string.format('{')) - table.insert(txt, string.format(' if (!(arg%d_data[arg%d_i] = luaT_toudata(L, -1, "torch.%s")))', arg.i, arg.i, typename)) - table.insert(txt, string.format(' luaL_error(L, "expected %s in tensor array");', typename)) - table.insert(txt, string.format(' lua_pop(L, 1);')) - table.insert(txt, string.format('}')) - table.insert(txt, string.format('')) - return table.concat(txt, '\n') - end, - - init = function(arg) - end, - - carg = function(arg) - return string.format('arg%d_data,arg%d_size', arg.i, arg.i) - end, - - creturn = function(arg) - error('TensorArray cannot be returned.') - end, - - precall = function(arg) - end, - - postcall = function(arg) - return string.format('THFree(arg%d_data);', arg.i) - end - } -end - -types.LongArg = { - - vararg = true, - - helpname = function(arg) - return "(LongStorage | dim1 [dim2...])" - end, - - declare = function(arg) - return string.format("THLongStorage *arg%d = NULL;", arg.i) - end, - - init = function(arg) - if arg.default then - error('LongArg cannot have a default value') - end - end, - - check = function(arg, idx) - return string.format("torch_islongargs(L, %d)", idx) - end, - - read = function(arg, idx) - return string.format("arg%d = torch_checklongargs(L, %d);", arg.i, idx) - end, - - carg = function(arg, idx) - return string.format('arg%d', arg.i) - end, - - creturn = function(arg, idx) - return string.format('arg%d', arg.i) - end, - - precall = function(arg) - local txt = {} - if arg.returned then - table.insert(txt, string.format('luaT_pushudata(L, arg%d, "torch.LongStorage");', arg.i)) - end - return table.concat(txt, '\n') - end, - - postcall = function(arg) - local txt = {} - if arg.creturned then - -- this next line is actually debatable - table.insert(txt, string.format('THLongStorage_retain(arg%d);', arg.i)) - table.insert(txt, string.format('luaT_pushudata(L, arg%d, "torch.LongStorage");', arg.i)) - end - if not arg.returned and not arg.creturned then - table.insert(txt, string.format('THLongStorage_free(arg%d);', arg.i)) - end - return table.concat(txt, '\n') - end -} - -types.charoption = { - - helpname = function(arg) - if arg.values then - return "(" .. table.concat(arg.values, '|') .. ")" - end - end, - - declare = function(arg) - local txt = {} - table.insert(txt, string.format("const char *arg%d = NULL;", arg.i)) - if arg.default then - table.insert(txt, string.format("char arg%d_default = '%s';", arg.i, arg.default)) - end - return table.concat(txt, '\n') - end, - - init = function(arg) - return string.format("arg%d = &arg%d_default;", arg.i, arg.i) - end, - - check = function(arg, idx) - local txt = {} - local txtv = {} - table.insert(txt, string.format('(arg%d = lua_tostring(L, %d)) && (', arg.i, idx)) - for _,value in ipairs(arg.values) do - table.insert(txtv, string.format("*arg%d == '%s'", arg.i, value)) - end - table.insert(txt, table.concat(txtv, ' || ')) - table.insert(txt, ')') - return table.concat(txt, '') - end, - - read = function(arg, idx) - end, - - carg = function(arg, idx) - return string.format('arg%d', arg.i) - end, - - creturn = function(arg, idx) - end, - - precall = function(arg) - end, - - postcall = function(arg) - end -} - -for _,typename in ipairs({"ptrdiff_t", "size_t"}) do - types[typename] = { - - helpname = function(arg) - return typename - end, - - declare = function(arg) - -- if it is a number we initialize here - local default = tonumber(tostring(arg.default)) or 0 - return string.format("%s arg%d = %g;", typename, arg.i, default) - end, - - check = function(arg, idx) - return string.format("lua_isnumber(L, %d)", idx) - end, - - read = function(arg, idx) - return string.format("arg%d = (%s)lua_tonumber(L, %d);", arg.i, typename, idx) - end, - - init = function(arg) - -- otherwise do it here - if arg.default then - local default = tostring(arg.default) - if not tonumber(default) then - return string.format("arg%d = %s;", arg.i, default) - end - end - end, - - carg = function(arg) - return string.format('arg%d', arg.i) - end, - - creturn = function(arg) - return string.format('arg%d', arg.i) - end, - - precall = function(arg) - if arg.returned then - return string.format('lua_pushnumber(L, (lua_Number)arg%d);', arg.i) - end - end, - - postcall = function(arg) - if arg.creturned then - return string.format('lua_pushnumber(L, (lua_Number)arg%d);', arg.i) - end - end - } -end diff --git a/contrib/lua-torch/torch7/utils.c b/contrib/lua-torch/torch7/utils.c deleted file mode 100644 index 974d0ac08a..0000000000 --- a/contrib/lua-torch/torch7/utils.c +++ /dev/null @@ -1,255 +0,0 @@ -#include "general.h" -#include "utils.h" - -#ifdef WIN32 -# include -#else -# include -#endif - -THLongStorage* torch_checklongargs(lua_State *L, int index) -{ - THLongStorage *storage; - int i; - int narg = lua_gettop(L)-index+1; - - if(narg == 1 && luaT_toudata(L, index, "torch.LongStorage")) - { - THLongStorage *storagesrc = luaT_toudata(L, index, "torch.LongStorage"); - storage = THLongStorage_newWithSize(storagesrc->size); - THLongStorage_copy(storage, storagesrc); - } - else - { - storage = THLongStorage_newWithSize(narg); - for(i = index; i < index+narg; i++) - { - if(!lua_isnumber(L, i)) - { - THLongStorage_free(storage); - luaL_argerror(L, i, "number expected"); - } - THLongStorage_set(storage, i-index, lua_tonumber(L, i)); - } - } - return storage; -} - -int torch_islongargs(lua_State *L, int index) -{ - int narg = lua_gettop(L)-index+1; - - if(narg == 1 && luaT_toudata(L, index, "torch.LongStorage")) - { - return 1; - } - else - { - int i; - - for(i = index; i < index+narg; i++) - { - if(!lua_isnumber(L, i)) - return 0; - } - return 1; - } - return 0; -} - -#ifdef _WIN32 -#include -#include -static __declspec( thread ) LARGE_INTEGER ticksPerSecond = { 0 }; -#endif - -static int torch_isatty(lua_State *L) -{ - FILE **fp = (FILE **) luaL_checkudata(L, -1, LUA_FILEHANDLE); -#ifdef _WIN32 - lua_pushboolean(L, _isatty(_fileno(*fp))); -#else - lua_pushboolean(L, isatty(fileno(*fp))); -#endif - return 1; -} - -static double real_time(void) -{ -#ifdef _WIN32 - if (ticksPerSecond.QuadPart == 0) - { - QueryPerformanceFrequency(&ticksPerSecond); - } - LARGE_INTEGER current; - QueryPerformanceCounter(¤t); - return (double)(current.QuadPart) / ticksPerSecond.QuadPart; -#else - struct timeval current; - gettimeofday(¤t, NULL); - return (current.tv_sec + current.tv_usec/1000000.0); -#endif -} - -static int torch_lua_tic(lua_State* L) -{ - double ttime = real_time(); - lua_pushnumber(L,ttime); - return 1; -} - -static int torch_lua_toc(lua_State* L) -{ - double toctime = real_time(); - lua_Number tictime = luaL_checknumber(L,1); - lua_pushnumber(L,toctime-tictime); - return 1; -} - -static int torch_lua_getdefaulttensortype(lua_State *L) -{ - const char* tname = torch_getdefaulttensortype(L); - if(tname) - { - lua_pushstring(L, tname); - return 1; - } - return 0; -} - -const char* torch_getdefaulttensortype(lua_State *L) -{ - lua_getglobal(L, "torch"); - if(lua_istable(L, -1)) - { - lua_getfield(L, -1, "Tensor"); - if(lua_istable(L, -1)) - { - if(lua_getmetatable(L, -1)) - { - lua_pushstring(L, "__index"); - lua_rawget(L, -2); - if(lua_istable(L, -1)) - { - lua_rawget(L, LUA_REGISTRYINDEX); - if(lua_isstring(L, -1)) - { - const char *tname = lua_tostring(L, -1); - lua_pop(L, 4); - return tname; - } - } - else - { - lua_pop(L, 4); - return NULL; - } - } - else - { - lua_pop(L, 2); - return NULL; - } - } - else - { - lua_pop(L, 2); - return NULL; - } - } - else - { - lua_pop(L, 1); - return NULL; - } - return NULL; -} - -static int torch_getnumthreads(lua_State *L) -{ - lua_pushinteger(L, THGetNumThreads()); - return 1; -} - -static int torch_setnumthreads(lua_State *L) -{ - THSetNumThreads(luaL_checkint(L, 1)); - return 0; -} - -static int torch_getnumcores(lua_State *L) -{ - lua_pushinteger(L, THGetNumCores()); - return 1; -} - -static void luaTorchGCFunction(void *data) -{ - lua_State *L = data; - lua_gc(L, LUA_GCCOLLECT, 0); -} - -static int torch_setheaptracking(lua_State *L) -{ - int enabled = luaT_checkboolean(L,1); - lua_getglobal(L, "torch"); - lua_pushboolean(L, enabled); - lua_setfield(L, -2, "_heaptracking"); - if(enabled) { - THSetGCHandler(luaTorchGCFunction, L); - } else { - THSetGCHandler(NULL, NULL); - } - return 0; -} - -static void luaTorchErrorHandlerFunction(const char *msg, void *data) -{ - lua_State *L = data; - luaL_error(L, msg); -} - -static void luaTorchArgErrorHandlerFunction(int argNumber, const char *msg, void *data) -{ - lua_State *L = data; - luaL_argcheck(L, 0, argNumber, msg); -} - -static int torch_updateerrorhandlers(lua_State *L) -{ - THSetErrorHandler(luaTorchErrorHandlerFunction, L); - THSetArgErrorHandler(luaTorchArgErrorHandlerFunction, L); - return 0; -} - -static const struct luaL_Reg torch_utils__ [] = { - {"getdefaulttensortype", torch_lua_getdefaulttensortype}, - {"isatty", torch_isatty}, - {"tic", torch_lua_tic}, - {"toc", torch_lua_toc}, - {"setnumthreads", torch_setnumthreads}, - {"getnumthreads", torch_getnumthreads}, - {"getnumcores", torch_getnumcores}, - {"factory", luaT_lua_factory}, - {"getconstructortable", luaT_lua_getconstructortable}, - {"typename", luaT_lua_typename}, - {"isequal", luaT_lua_isequal}, - {"getenv", luaT_lua_getenv}, - {"setenv", luaT_lua_setenv}, - {"newmetatable", luaT_lua_newmetatable}, - {"setmetatable", luaT_lua_setmetatable}, - {"getmetatable", luaT_lua_getmetatable}, - {"metatype", luaT_lua_metatype}, - {"pushudata", luaT_lua_pushudata}, - {"version", luaT_lua_version}, - {"pointer", luaT_lua_pointer}, - {"setheaptracking", torch_setheaptracking}, - {"updateerrorhandlers", torch_updateerrorhandlers}, - {NULL, NULL} -}; - -void torch_utils_init(lua_State *L) -{ - torch_updateerrorhandlers(L); - luaT_setfuncs(L, torch_utils__, 0); -} diff --git a/contrib/lua-torch/torch7/utils.h b/contrib/lua-torch/torch7/utils.h deleted file mode 100644 index f9654d4b7e..0000000000 --- a/contrib/lua-torch/torch7/utils.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef TORCH_UTILS_INC -#define TORCH_UTILS_INC - -#include "luaT.h" -#include "TH.h" - -#include -#include - -#ifdef _WIN32 -#else -#include -#endif - -#ifdef __cplusplus -# define TORCH_EXTERNC extern "C" -#else -# define TORCH_EXTERNC extern -#endif - -#ifdef _WIN32 -# ifdef torch_EXPORTS -# define TORCH_API TORCH_EXTERNC __declspec(dllexport) -# else -# define TORCH_API TORCH_EXTERNC __declspec(dllimport) -# endif -#else -# define TORCH_API TORCH_EXTERNC -#endif - - -TORCH_API THLongStorage* torch_checklongargs(lua_State *L, int index); -TORCH_API int torch_islongargs(lua_State *L, int index); -TORCH_API const char* torch_getdefaulttensortype(lua_State *L); - -#endif