Skip to content

Commit

Permalink
Merge pull request #2 from ButterflyNetwork/py3
Browse files Browse the repository at this point in the history
Port to Python3 and fix scripts.
  • Loading branch information
cmars authored Jun 27, 2017
2 parents 5f80f8a + e593f53 commit 46cb166
Show file tree
Hide file tree
Showing 8 changed files with 148 additions and 172 deletions.
59 changes: 29 additions & 30 deletions pystdf/IO.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Expand Down Expand Up @@ -52,7 +52,7 @@ def readAndUnpack(self, header, fmt):
header.len -= len(buf)
val,=struct.unpack(self.endian + fmt, buf)
return val

def readAndUnpackDirect(self, fmt):
size = struct.calcsize(fmt)
buf = self.inp.read(size)
Expand All @@ -61,13 +61,13 @@ def readAndUnpackDirect(self, fmt):
raise EofException()
val,=struct.unpack(self.endian + fmt, buf)
return val

def readField(self, header, stdfFmt):
return self.readAndUnpack(header, packFormatMap[stdfFmt])

def readFieldDirect(self, stdfFmt):
return self.readAndUnpackDirect(packFormatMap[stdfFmt])

def readCn(self, header):
if header.len == 0:
raise EndOfRecordException()
Expand All @@ -85,49 +85,49 @@ def readCn(self, header):
header.len -= len(buf)
val,=struct.unpack(str(slen) + "s", buf)
return val

def readBn(self, header):
blen = self.readField(header, "U1")
bn = []
for i in range(0, blen):
bn.append(readField(header, "B1"))
return bn

def readDn(self, header):
dbitlen = self.readField(header, "U2")
dlen = dbitlen / 8
if dbitlen % 8 > 0:
dlen+=1
dn = []
for i in range(0, dlen):
for i in range(0, int(dlen)):
dn.append(self.readField(header, "B1"))
return dn

def readVn(self, header):
vlen = self.readField(header, "U2")
vn = []
for i in range(0, vlen):
fldtype = self.readField(header, "B1")
if self.vnMap.has_key(fldtype):
if fldtype in self.vnMap:
vn.append(self.vnMap[fldtype](header))
return vn

def readArray(self, header, indexValue, stdfFmt):
if (stdfFmt == 'N1'):
self.readArray(header, indexValue/2+indexValue%2, 'U1')
return
arr = []
for i in range(indexValue):
for i in range(int(indexValue)):
arr.append(self.unpackMap[stdfFmt](header, stdfFmt))
return arr

def readHeader(self):
hdr = RecordHeader()
hdr.len = self.readFieldDirect("U2")
hdr.typ = self.readFieldDirect("U1")
hdr.sub = self.readFieldDirect("U1")
return hdr

def __detectEndian(self):
self.eof = 0
header = self.readHeader()
Expand All @@ -142,9 +142,9 @@ def __detectEndian(self):
return '<'
else:
return '>'

def header(self, header): pass

def parse_records(self, count=0):
i = 0
self.eof = 0
Expand All @@ -153,7 +153,7 @@ def parse_records(self, count=0):
while self.eof==0:
header = self.readHeader()
self.header(header)
if (self.recordMap.has_key((header.typ, header.sub))):
if (header.typ, header.sub) in self.recordMap:
recType = self.recordMap[(header.typ, header.sub)]
recParser = self.recordParsers[(header.typ, header.sub)]
fields = recParser(self, header, [])
Expand All @@ -166,49 +166,49 @@ def parse_records(self, count=0):
i += 1
if i >= count: break
except EofException: pass

def auto_detect_endian(self):
if self.inp.tell() == 0:
self.endian = '@'
self.endian = self.__detectEndian()

def parse(self, count=0):
self.begin()

try:
self.auto_detect_endian()
self.parse_records(count)
self.complete()
except Exception, exception:
except Exception as exception:
self.cancel(exception)
raise

def getFieldParser(self, fieldType):
if (fieldType.startswith("k")):
fieldIndex, arrayFmt = re.match('k(\d+)([A-Z][a-z0-9]+)', fieldType).groups()
return lambda self, header, fields: self.readArray(header, fields[int(fieldIndex)], arrayFmt)
else:
parseFn = self.unpackMap[fieldType]
return lambda self, header, fields: parseFn(header, fieldType)

def createRecordParser(self, recType):
fn = lambda self, header, fields: fields
for stdfType in recType.fieldStdfTypes:
fn = appendFieldParser(fn, self.getFieldParser(stdfType))
return fn

def __init__(self, recTypes=V4.records, inp=sys.stdin, reopen_fn=None, endian=None):
DataSource.__init__(self, ['header']);
self.eof = 1
self.recTypes = set(recTypes)
self.inp = inp
self.reopen_fn = reopen_fn
self.endian = endian

self.recordMap = dict(
[ ( (recType.typ, recType.sub), recType )
for recType in recTypes ])

self.unpackMap = {
"C1": self.readField,
"B1": self.readField,
Expand All @@ -227,11 +227,11 @@ def __init__(self, recTypes=V4.records, inp=sys.stdin, reopen_fn=None, endian=No
"Dn": lambda header, fmt: self.readDn(header),
"Vn": lambda header, fmt: self.readVn(header)
}

self.recordParsers = dict(
[ ( (recType.typ, recType.sub), self.createRecordParser(recType) )
for recType in recTypes ])

self.vnMap = {
0: lambda header: self.inp.read(header, 1),
1: lambda header: self.readField(header, "U1"),
Expand All @@ -247,4 +247,3 @@ def __init__(self, recTypes=V4.records, inp=sys.stdin, reopen_fn=None, endian=No
12: lambda header: self.readDn(header),
13: lambda header: self.readField(header, "U1")
}

23 changes: 11 additions & 12 deletions pystdf/Mapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Expand All @@ -22,30 +22,30 @@
from pystdf import V4

class StreamMapper(StreamIndexer):

def __init__(self, types=V4.records):
self.indexes = []
self.types = []
self.__rec_map = dict([((recType.typ, recType.sub), recType)
for recType in types])

def before_header(self, dataSource, header):
StreamIndexer.before_header(self, dataSource, header)
self.indexes.append(self.position)
key = (self.header.typ, self.header.sub)
rectype = self.__rec_map.get(key, UnknownRecord(*key))
self.types.append(rectype)

class MaterialMapper(MaterialIndexer):
indexable_types = set([V4.wir, V4.wrr, V4.pir, V4.prr, V4.ptr, V4.mpr, V4.ftr])
per_part_types = set([V4.pir, V4.prr, V4.ptr, V4.mpr, V4.ftr])

def before_begin(self, dataSource):
MaterialIndexer.before_begin(self, dataSource)
self.waferid = []
self.insertionid = []
self.partid = []

def before_send(self, dataSource, data):
MaterialIndexer.before_send(self, dataSource, data)
rectype, rec = data
Expand All @@ -62,20 +62,19 @@ def before_send(self, dataSource, data):
self.waferid.append(None)
self.insertionid.append(None)
self.partid.append(None)

if __name__ == '__main__':
from pystdf.IO import Parser
from pystdf.Writers import AtdfWriter
import pystdf.V4

filename, = sys.argv[1:]
f = open(filename, 'rb')
p=Parser(inp=f)
record_mapper = StreamMapper()
p.addSink(record_mapper)
p.parse()
f.close()

for index, rectype in zip(record_mapper.indexes, record_mapper.types):
print index, rectype

print(index, rectype)
Loading

0 comments on commit 46cb166

Please sign in to comment.