Skip to content

Commit

Permalink
Update pyarchivefile.py
Browse files Browse the repository at this point in the history
  • Loading branch information
KazukiPrzyborowski committed Feb 11, 2025
1 parent 27b2d4c commit 9b1736d
Showing 1 changed file with 45 additions and 29 deletions.
74 changes: 45 additions & 29 deletions pyarchivefile.py
Original file line number Diff line number Diff line change
Expand Up @@ -2085,7 +2085,7 @@ def ReadFileHeaderDataWithContent(fp, listonly=False, uncompress=True, skipcheck
fcompression = HeaderOut[12]
fcsize = int(HeaderOut[13], 16)
fseeknextfile = HeaderOut[25]
fjsonsize = int(HeaderOut[27], 16)
fjsonsize = int(HeaderOut[29], 16)
fjsoncontent = {}
fprejsoncontent = fp.read(fjsonsize).decode("UTF-8")
if(fjsonsize > 0):
Expand Down Expand Up @@ -2207,11 +2207,13 @@ def ReadFileHeaderDataWithContentToArray(fp, listonly=False, contentasfile=True,
fdev_minor = int(HeaderOut[24], 16)
fdev_major = int(HeaderOut[25], 16)
fseeknextfile = HeaderOut[26]
fjsonsize = int(HeaderOut[27], 16)
fextrasize = int(HeaderOut[28], 16)
fextrafields = int(HeaderOut[29], 16)
fjsontype = HeaderOut[27]
fjsonlen = int(HeaderOut[28], 16)
fjsonsize = int(HeaderOut[29], 16)
fextrasize = int(HeaderOut[30], 16)
fextrafields = int(HeaderOut[31], 16)
fextrafieldslist = []
extrastart = 30
extrastart = 32
extraend = extrastart + fextrafields
while(extrastart < extraend):
fextrafieldslist.append(HeaderOut[extrastart])
Expand Down Expand Up @@ -2309,7 +2311,7 @@ def ReadFileHeaderDataWithContentToArray(fp, listonly=False, contentasfile=True,
if(not contentasfile):
fcontents = fcontents.read()
outlist = {'fheadersize': fheadsize, 'fhstart': fheaderstart, 'fhend': fhend, 'ftype': ftype, 'fencoding': fencoding, 'fcencoding': fcencoding, 'fname': fname, 'fbasedir': fbasedir, 'flinkname': flinkname, 'fsize': fsize, 'fatime': fatime, 'fmtime': fmtime, 'fctime': fctime, 'fbtime': fbtime, 'fmode': fmode, 'fchmode': fchmode, 'ftypemod': ftypemod, 'fwinattributes': fwinattributes, 'fcompression': fcompression, 'fcsize': fcsize, 'fuid': fuid, 'funame': funame, 'fgid': fgid, 'fgname': fgname, 'finode': finode, 'flinkcount': flinkcount,
'fdev': fdev, 'fminor': fdev_minor, 'fmajor': fdev_major, 'fseeknextfile': fseeknextfile, 'fheaderchecksumtype': HeaderOut[-4], 'fcontentchecksumtype': HeaderOut[-3], 'fnumfields': fnumfields + 2, 'frawheader': HeaderOut, 'fextrafields': fextrafields, 'fextrafieldsize': fextrasize, 'fextradata': fextrafieldslist, 'fjsondata': fjsoncontent, 'fheaderchecksum': fcs, 'fcontentchecksum': fccs, 'fhascontents': pyhascontents, 'fcontentstart': fcontentstart, 'fcontentend': fcontentend, 'fcontentasfile': contentasfile, 'fcontents': fcontents}
'fdev': fdev, 'fminor': fdev_minor, 'fmajor': fdev_major, 'fseeknextfile': fseeknextfile, 'fheaderchecksumtype': HeaderOut[-4], 'fcontentchecksumtype': HeaderOut[-3], 'fnumfields': fnumfields + 2, 'frawheader': HeaderOut, 'fextrafields': fextrafields, 'fextrafieldsize': fextrasize, 'fextradata': fextrafieldslist, 'fjsontype': fjsontype, 'fjsonlen': fjsonlen, 'fjsonsize': fjsonsize, 'fjsondata': fjsoncontent, 'fheaderchecksum': fcs, 'fcontentchecksum': fccs, 'fhascontents': pyhascontents, 'fcontentstart': fcontentstart, 'fcontentend': fcontentend, 'fcontentasfile': contentasfile, 'fcontents': fcontents}
return outlist


Expand Down Expand Up @@ -2357,11 +2359,13 @@ def ReadFileHeaderDataWithContentToList(fp, listonly=False, contentasfile=False,
fdev_minor = int(HeaderOut[24], 16)
fdev_major = int(HeaderOut[25], 16)
fseeknextfile = HeaderOut[26]
fjsonsize = int(HeaderOut[27], 16)
fextrasize = int(HeaderOut[28], 16)
fextrafields = int(HeaderOut[29], 16)
fjsontype = HeaderOut[27]
fjsonlen = int(HeaderOut[28], 16)
fjsonsize = int(HeaderOut[29], 16)
fextrasize = int(HeaderOut[30], 16)
fextrafields = int(HeaderOut[31], 16)
fextrafieldslist = []
extrastart = 30
extrastart = 32
extraend = extrastart + fextrafields
while(extrastart < extraend):
fextrafieldslist.append(HeaderOut[extrastart])
Expand Down Expand Up @@ -3380,9 +3384,10 @@ def AppendFileHeaderWithContent(fp, filevalues=[], extradata=[], jsondata={}, fi
extrasizestr = extrasizestr + \
AppendNullBytes(extradata, formatspecs['format_delimiter'])
extrasizelen = format(len(extrasizestr), 'x').lower()
tmpoutlen = len(filevalues) + len(extradata) + 7
tmpoutlen = len(filevalues) + len(extradata) + 9
tmpoutlenhex = format(tmpoutlen, 'x').lower()
tmpoutlist = filevalues
fjsontype = "json"
if(len(jsondata) > 0):
try:
fjsoncontent = base64.b64encode(json.dumps(jsondata, separators=(',', ':')).encode("UTF-8"))
Expand All @@ -3394,7 +3399,10 @@ def AppendFileHeaderWithContent(fp, filevalues=[], extradata=[], jsondata={}, fi
else:
fjsoncontent = "".encode("UTF-8")
fjsonsize = format(len(fjsoncontent), 'x').lower()
fjsonlen = format(len(jsondata), 'x').lower()
tmpoutlist.insert(0, tmpoutlenhex)
tmpoutlist.append(fjsontype)
tmpoutlist.append(fjsonlen)
tmpoutlist.append(fjsonsize)
tmpoutlist.append(extrasizelen)
tmpoutlist.append(extrafields)
Expand Down Expand Up @@ -7837,14 +7845,16 @@ def ArchiveFileSeekToFileName(infile, fmttype="auto", seekfile=None, listonly=Fa
prefcompression = preheaderdata[14]
prefcsize = int(preheaderdata[15], 16)
prefseeknextfile = preheaderdata[26]
prefjsonsize = int(HeaderOut[27], 16)
prefjsontype = preheaderdata[27]
prefjsonlen = int(preheaderdata[28], 16)
prefjsonsize = int(preheaderdata[29], 16)
prefjoutfprejsoncontent = fp.read(prefjsonsize).decode("UTF-8")
if(prefjsonsize <= 0):
prefjoutfprejsoncontent = "".encode()
fp.seek(len(formatspecs['format_delimiter']), 1)
prefextrasize = int(preheaderdata[28], 16)
prefextrafields = int(preheaderdata[29], 16)
extrastart = 30
prefextrasize = int(preheaderdata[30], 16)
prefextrafields = int(preheaderdata[31], 16)
extrastart = 32
extraend = extrastart + prefextrafields
prefcs = preheaderdata[-2].lower()
prenewfcs = preheaderdata[-1].lower()
Expand Down Expand Up @@ -8149,7 +8159,9 @@ def ArchiveFileValidate(infile, fmttype="auto", formatspecs=__file_format_multi_
outfdev_minor = int(inheaderdata[24], 16)
outfdev_major = int(inheaderdata[25], 16)
outfseeknextfile = inheaderdata[26]
outfjsonsize = int(inheaderdata[27], 16)
outfjsontype = inheaderdata[27]
outfjsonlen = int(inheaderdata[28], 16)
outfjsonsize = int(inheaderdata[29], 16)
outfjsoncontent = {}
outfprejsoncontent = fp.read(outfjsonsize).decode("UTF-8")
if(outfjsonsize > 0):
Expand All @@ -8165,10 +8177,10 @@ def ArchiveFileValidate(infile, fmttype="auto", formatspecs=__file_format_multi_
outfprejsoncontent = ""
outfjsoncontent = {}
fp.seek(len(formatspecs['format_delimiter']), 1)
outfextrasize = int(inheaderdata[28], 16)
outfextrafields = int(inheaderdata[29], 16)
outfextrasize = int(inheaderdata[30], 16)
outfextrafields = int(inheaderdata[31], 16)
extrafieldslist = []
extrastart = 30
extrastart = 32
extraend = extrastart + outfextrafields
outfcs = inheaderdata[-2].lower()
outfccs = inheaderdata[-1].lower()
Expand Down Expand Up @@ -8475,14 +8487,16 @@ def ArchiveFileToArray(infile, fmttype="auto", seekstart=0, seekend=0, listonly=
prefcompression = preheaderdata[14]
prefcsize = int(preheaderdata[15], 16)
prefseeknextfile = preheaderdata[26]
prefjsonsize = int(preheaderdata[27], 16)
prefjsontype = preheaderdata[27]
prefjsonlen = int(preheaderdata[28], 16)
prefjsonsize = int(preheaderdata[29], 16)
prefjoutfprejsoncontent = fp.read(prefjsonsize).decode("UTF-8")
if(prefjsonsize <= 0):
prefjoutfprejsoncontent = ""
prefjoutfprejsoncontent = "".encode()
fp.seek(len(formatspecs['format_delimiter']), 1)
prefextrasize = int(preheaderdata[28], 16)
prefextrafields = int(preheaderdata[29], 16)
extrastart = 30
prefextrasize = int(preheaderdata[30], 16)
prefextrafields = int(preheaderdata[31], 16)
extrastart = 32
extraend = extrastart + prefextrafields
prefcs = preheaderdata[-2].lower()
prenewfcs = preheaderdata[-1].lower()
Expand Down Expand Up @@ -8577,7 +8591,9 @@ def ArchiveFileToArray(infile, fmttype="auto", seekstart=0, seekend=0, listonly=
outfdev_minor = int(inheaderdata[24], 16)
outfdev_major = int(inheaderdata[25], 16)
outfseeknextfile = inheaderdata[26]
outfjsonsize = int(inheaderdata[27], 16)
outfjsontype = inheaderdata[27]
outfjsonlen = int(inheaderdata[28], 16)
outfjsonsize = int(inheaderdata[29], 16)
outfjsoncontent = {}
outfprejsoncontent = fp.read(outfjsonsize).decode("UTF-8")
if(outfjsonsize > 0):
Expand All @@ -8593,10 +8609,10 @@ def ArchiveFileToArray(infile, fmttype="auto", seekstart=0, seekend=0, listonly=
outfprejsoncontent = ""
outfjsoncontent = {}
fp.seek(len(formatspecs['format_delimiter']), 1)
outfextrasize = int(inheaderdata[28], 16)
outfextrafields = int(inheaderdata[29], 16)
outfextrasize = int(inheaderdata[30], 16)
outfextrafields = int(inheaderdata[31], 16)
extrafieldslist = []
extrastart = 30
extrastart = 32
extraend = extrastart + outfextrafields
while(extrastart < extraend):
extrafieldslist.append(inheaderdata[extrastart])
Expand Down Expand Up @@ -8680,7 +8696,7 @@ def ArchiveFileToArray(infile, fmttype="auto", seekstart=0, seekend=0, listonly=
outfcontents.seek(0, 0)
if(not contentasfile):
outfcontents = outfcontents.read()
outlist['ffilelist'].append({'fid': realidnum, 'fidalt': fileidnum, 'fheadersize': outfheadsize, 'fhstart': outfhstart, 'fhend': outfhend, 'ftype': outftype, 'fencoding': outfencoding, 'fcencoding': outfcencoding, 'fname': outfname, 'fbasedir': outfbasedir, 'flinkname': outflinkname, 'fsize': outfsize, 'fatime': outfatime, 'fmtime': outfmtime, 'fctime': outfctime, 'fbtime': outfbtime, 'fmode': outfmode, 'fchmode': outfchmode, 'ftypemod': outftypemod, 'fwinattributes': outfwinattributes, 'fcompression': outfcompression, 'fcsize': outfcsize, 'fuid': outfuid, 'funame': outfuname, 'fgid': outfgid, 'fgname': outfgname, 'finode': outfinode, 'flinkcount': outflinkcount, 'fdev': outfdev, 'fminor': outfdev_minor, 'fmajor': outfdev_major, 'fseeknextfile': outfseeknextfile, 'fheaderchecksumtype': inheaderdata[-4], 'fcontentchecksumtype': inheaderdata[-3], 'fnumfields': outfnumfields + 2, 'frawheader': inheaderdata, 'fextrafields': outfextrafields, 'fextrafieldsize': outfextrasize, 'fextradata': extrafieldslist, 'fjsondata': outfjsoncontent, 'fheaderchecksum': outfcs, 'fcontentchecksum': outfccs, 'fhascontents': pyhascontents, 'fcontentstart': outfcontentstart, 'fcontentend': outfcontentend, 'fcontentasfile': contentasfile, 'fcontents': outfcontents})
outlist['ffilelist'].append({'fid': realidnum, 'fidalt': fileidnum, 'fheadersize': outfheadsize, 'fhstart': outfhstart, 'fhend': outfhend, 'ftype': outftype, 'fencoding': outfencoding, 'fcencoding': outfcencoding, 'fname': outfname, 'fbasedir': outfbasedir, 'flinkname': outflinkname, 'fsize': outfsize, 'fatime': outfatime, 'fmtime': outfmtime, 'fctime': outfctime, 'fbtime': outfbtime, 'fmode': outfmode, 'fchmode': outfchmode, 'ftypemod': outftypemod, 'fwinattributes': outfwinattributes, 'fcompression': outfcompression, 'fcsize': outfcsize, 'fuid': outfuid, 'funame': outfuname, 'fgid': outfgid, 'fgname': outfgname, 'finode': outfinode, 'flinkcount': outflinkcount, 'fdev': outfdev, 'fminor': outfdev_minor, 'fmajor': outfdev_major, 'fseeknextfile': outfseeknextfile, 'fheaderchecksumtype': inheaderdata[-4], 'fcontentchecksumtype': inheaderdata[-3], 'fnumfields': outfnumfields + 2, 'frawheader': inheaderdata, 'fextrafields': outfextrafields, 'fextrafieldsize': outfextrasize, 'fextradata': extrafieldslist, 'fjsontype': outfjsontype, 'fjsonlen': outfjsonlen, 'fjsonsize': outfjsonsize, 'fjsondata': outfjsoncontent, 'fheaderchecksum': outfcs, 'fcontentchecksum': outfccs, 'fhascontents': pyhascontents, 'fcontentstart': outfcontentstart, 'fcontentend': outfcontentend, 'fcontentasfile': contentasfile, 'fcontents': outfcontents})
fileidnum = fileidnum + 1
realidnum = realidnum + 1
if(returnfp):
Expand Down

0 comments on commit 9b1736d

Please sign in to comment.