Split xref table and stream code and tests
This commit is contained in:
272
sign/pdfxref.go
272
sign/pdfxref.go
@@ -2,26 +2,18 @@ package sign
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type xrefEntry struct {
|
||||
ID uint32
|
||||
Offset int64
|
||||
ID uint32
|
||||
Offset int64
|
||||
Generation int
|
||||
Free bool
|
||||
}
|
||||
|
||||
const (
|
||||
xrefStreamColumns = 6 // Column width (1+4+1)
|
||||
xrefStreamPredictor = 12
|
||||
defaultPredictor = 1 // No prediction (the default value)
|
||||
pngSubPredictor = 11 // PNG prediction (on encoding, PNG Sub on all rows)
|
||||
pngUpPredictor = 12 // PNG prediction (on encoding, PNG Up on all rows)
|
||||
objectFooter = "\nendobj\n"
|
||||
objectFooter = "\nendobj\n"
|
||||
)
|
||||
|
||||
func (context *SignContext) addObject(object []byte) (uint32, error) {
|
||||
@@ -117,257 +109,3 @@ func (context *SignContext) getLastObjectIDFromXref() (uint32, error) {
|
||||
|
||||
return maxID + 1, nil
|
||||
}
|
||||
|
||||
// writeIncrXrefTable writes the incremental cross-reference table to the output buffer.
|
||||
func (context *SignContext) writeIncrXrefTable() error {
|
||||
// Write xref header
|
||||
if _, err := context.OutputBuffer.Write([]byte("xref\n")); err != nil {
|
||||
return fmt.Errorf("failed to write incremental xref header: %w", err)
|
||||
}
|
||||
|
||||
// Write updated entries
|
||||
for _, entry := range context.updatedXrefEntries {
|
||||
pageXrefObj := fmt.Sprintf("%d %d\n", entry.ID, 1)
|
||||
if _, err := context.OutputBuffer.Write([]byte(pageXrefObj)); err != nil {
|
||||
return fmt.Errorf("failed to write updated xref object: %w", err)
|
||||
}
|
||||
|
||||
xrefLine := fmt.Sprintf("%010d 00000 n\r\n", entry.Offset)
|
||||
if _, err := context.OutputBuffer.Write([]byte(xrefLine)); err != nil {
|
||||
return fmt.Errorf("failed to write updated incremental xref entry: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write xref subsection header
|
||||
startXrefObj := fmt.Sprintf("%d %d\n", context.lastXrefID+1, len(context.newXrefEntries))
|
||||
if _, err := context.OutputBuffer.Write([]byte(startXrefObj)); err != nil {
|
||||
return fmt.Errorf("failed to write starting xref object: %w", err)
|
||||
}
|
||||
|
||||
// Write new entries
|
||||
for _, entry := range context.newXrefEntries {
|
||||
xrefLine := fmt.Sprintf("%010d 00000 n\r\n", entry.Offset)
|
||||
if _, err := context.OutputBuffer.Write([]byte(xrefLine)); err != nil {
|
||||
return fmt.Errorf("failed to write incremental xref entry: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeXrefStream writes the cross-reference stream to the output buffer.
|
||||
func (context *SignContext) writeXrefStream() error {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
predictor := context.PDFReader.Trailer().Key("DecodeParms").Key("Predictor").Int64()
|
||||
if predictor == 0 {
|
||||
predictor = xrefStreamPredictor
|
||||
}
|
||||
|
||||
if err := writeXrefStreamEntries(&buffer, context); err != nil {
|
||||
return fmt.Errorf("failed to write xref stream entries: %w", err)
|
||||
}
|
||||
|
||||
streamBytes, err := encodeXrefStream(buffer.Bytes(), predictor)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode xref stream: %w", err)
|
||||
}
|
||||
|
||||
var xrefStreamObject bytes.Buffer
|
||||
|
||||
if err := writeXrefStreamHeader(&xrefStreamObject, context, len(streamBytes)); err != nil {
|
||||
return fmt.Errorf("failed to write xref stream header: %w", err)
|
||||
}
|
||||
|
||||
if err := writeXrefStreamContent(&xrefStreamObject, streamBytes); err != nil {
|
||||
return fmt.Errorf("failed to write xref stream content: %w", err)
|
||||
}
|
||||
|
||||
_, err = context.addObject(xrefStreamObject.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add xref stream object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeXrefStreamEntries writes the individual entries for the xref stream.
|
||||
func writeXrefStreamEntries(buffer *bytes.Buffer, context *SignContext) error {
|
||||
// Write updated entries first
|
||||
for _, entry := range context.updatedXrefEntries {
|
||||
writeXrefStreamLine(buffer, 1, int(entry.Offset), 0)
|
||||
}
|
||||
|
||||
// Write new entries
|
||||
for _, entry := range context.newXrefEntries {
|
||||
writeXrefStreamLine(buffer, 1, int(entry.Offset), 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeXrefStream applies the appropriate encoding to the xref stream.
|
||||
func encodeXrefStream(data []byte, predictor int64) ([]byte, error) {
|
||||
// Use FlateDecode without prediction for xref streams
|
||||
var b bytes.Buffer
|
||||
w := zlib.NewWriter(&b)
|
||||
if _, err := w.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Close()
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// writeXrefStreamHeader writes the header for the xref stream.
|
||||
func writeXrefStreamHeader(buffer *bytes.Buffer, context *SignContext, streamLength int) error {
|
||||
id := context.PDFReader.Trailer().Key("ID")
|
||||
|
||||
// Calculate total entries and create index array
|
||||
totalEntries := uint32(context.PDFReader.XrefInformation.ItemCount)
|
||||
var indexArray []uint32
|
||||
|
||||
// Add existing entries section
|
||||
if len(context.updatedXrefEntries) > 0 {
|
||||
for _, entry := range context.updatedXrefEntries {
|
||||
indexArray = append(indexArray, entry.ID, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Add new entries section
|
||||
if len(context.newXrefEntries) > 0 {
|
||||
indexArray = append(indexArray, context.lastXrefID+1, uint32(len(context.newXrefEntries)))
|
||||
totalEntries += uint32(len(context.newXrefEntries))
|
||||
}
|
||||
|
||||
buffer.WriteString("<< /Type /XRef\n")
|
||||
buffer.WriteString(fmt.Sprintf(" /Length %d\n", streamLength))
|
||||
buffer.WriteString(" /Filter /FlateDecode\n")
|
||||
// Change W array to [1 4 1] to accommodate larger offsets
|
||||
buffer.WriteString(" /W [ 1 4 1 ]\n")
|
||||
buffer.WriteString(fmt.Sprintf(" /Prev %d\n", context.PDFReader.XrefInformation.StartPos))
|
||||
buffer.WriteString(fmt.Sprintf(" /Size %d\n", totalEntries+1))
|
||||
|
||||
// Write index array if we have entries
|
||||
if len(indexArray) > 0 {
|
||||
buffer.WriteString(" /Index [")
|
||||
for _, idx := range indexArray {
|
||||
buffer.WriteString(fmt.Sprintf(" %d", idx))
|
||||
}
|
||||
buffer.WriteString(" ]\n")
|
||||
}
|
||||
|
||||
buffer.WriteString(fmt.Sprintf(" /Root %d 0 R\n", context.CatalogData.ObjectId))
|
||||
|
||||
if !id.IsNull() {
|
||||
id0 := hex.EncodeToString([]byte(id.Index(0).RawString()))
|
||||
id1 := hex.EncodeToString([]byte(id.Index(1).RawString()))
|
||||
buffer.WriteString(fmt.Sprintf(" /ID [<%s><%s>]\n", id0, id1))
|
||||
}
|
||||
|
||||
buffer.WriteString(">>\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeXrefStreamContent writes the content of the xref stream.
|
||||
func writeXrefStreamContent(buffer *bytes.Buffer, streamBytes []byte) error {
|
||||
if _, err := io.WriteString(buffer, "stream\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := buffer.Write(streamBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.WriteString(buffer, "\nendstream\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeXrefStreamLine writes a single line in the xref stream.
|
||||
func writeXrefStreamLine(b *bytes.Buffer, xreftype byte, offset int, gen byte) {
|
||||
// Write type (1 byte)
|
||||
b.WriteByte(xreftype)
|
||||
|
||||
// Write offset (4 bytes)
|
||||
offsetBytes := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(offsetBytes, uint32(offset))
|
||||
b.Write(offsetBytes)
|
||||
|
||||
// Write generation (1 byte)
|
||||
b.WriteByte(gen)
|
||||
}
|
||||
|
||||
// EncodePNGSUBBytes encodes data using PNG SUB filter.
|
||||
func EncodePNGSUBBytes(columns int, data []byte) ([]byte, error) {
|
||||
rowCount := len(data) / columns
|
||||
if len(data)%columns != 0 {
|
||||
return nil, errors.New("invalid row/column length")
|
||||
}
|
||||
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
tmpRowData := make([]byte, columns)
|
||||
for i := 0; i < rowCount; i++ {
|
||||
rowData := data[columns*i : columns*(i+1)]
|
||||
tmpRowData[0] = rowData[0]
|
||||
for j := 1; j < columns; j++ {
|
||||
tmpRowData[j] = byte(int(rowData[j]-rowData[j-1]) % 256)
|
||||
}
|
||||
|
||||
buffer.WriteByte(1)
|
||||
buffer.Write(tmpRowData)
|
||||
}
|
||||
|
||||
data = buffer.Bytes()
|
||||
|
||||
var b bytes.Buffer
|
||||
w := zlib.NewWriter(&b)
|
||||
if _, err := w.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Close()
|
||||
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// EncodePNGUPBytes encodes data using PNG UP filter.
|
||||
func EncodePNGUPBytes(columns int, data []byte) ([]byte, error) {
|
||||
rowCount := len(data) / columns
|
||||
if len(data)%columns != 0 {
|
||||
return nil, errors.New("invalid row/column length")
|
||||
}
|
||||
|
||||
prevRowData := make([]byte, columns)
|
||||
|
||||
// Initially all previous data is zero.
|
||||
for i := 0; i < columns; i++ {
|
||||
prevRowData[i] = 0
|
||||
}
|
||||
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
tmpRowData := make([]byte, columns)
|
||||
for i := 0; i < rowCount; i++ {
|
||||
rowData := data[columns*i : columns*(i+1)]
|
||||
for j := 0; j < columns; j++ {
|
||||
tmpRowData[j] = byte(int(rowData[j]-prevRowData[j]) % 256)
|
||||
}
|
||||
|
||||
// Save the previous row for prediction.
|
||||
copy(prevRowData, rowData)
|
||||
|
||||
buffer.WriteByte(2)
|
||||
buffer.Write(tmpRowData)
|
||||
}
|
||||
|
||||
data = buffer.Bytes()
|
||||
|
||||
var b bytes.Buffer
|
||||
w := zlib.NewWriter(&b)
|
||||
if _, err := w.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Close()
|
||||
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
236
sign/pdfxref_stream.go
Normal file
236
sign/pdfxref_stream.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package sign
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
xrefStreamColumns = 6 // Column width (1+4+1)
|
||||
xrefStreamPredictor = 12
|
||||
defaultPredictor = 1 // No prediction (the default value)
|
||||
pngSubPredictor = 11 // PNG prediction (on encoding, PNG Sub on all rows)
|
||||
pngUpPredictor = 12 // PNG prediction (on encoding, PNG Up on all rows)
|
||||
)
|
||||
|
||||
// writeXrefStream writes the cross-reference stream to the output buffer.
|
||||
func (context *SignContext) writeXrefStream() error {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
predictor := context.PDFReader.Trailer().Key("DecodeParms").Key("Predictor").Int64()
|
||||
if predictor == 0 {
|
||||
predictor = xrefStreamPredictor
|
||||
}
|
||||
|
||||
if err := writeXrefStreamEntries(&buffer, context); err != nil {
|
||||
return fmt.Errorf("failed to write xref stream entries: %w", err)
|
||||
}
|
||||
|
||||
streamBytes, err := encodeXrefStream(buffer.Bytes(), predictor)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode xref stream: %w", err)
|
||||
}
|
||||
|
||||
var xrefStreamObject bytes.Buffer
|
||||
|
||||
if err := writeXrefStreamHeader(&xrefStreamObject, context, len(streamBytes)); err != nil {
|
||||
return fmt.Errorf("failed to write xref stream header: %w", err)
|
||||
}
|
||||
|
||||
if err := writeXrefStreamContent(&xrefStreamObject, streamBytes); err != nil {
|
||||
return fmt.Errorf("failed to write xref stream content: %w", err)
|
||||
}
|
||||
|
||||
_, err = context.addObject(xrefStreamObject.Bytes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add xref stream object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeXrefStreamEntries writes the individual entries for the xref stream.
|
||||
func writeXrefStreamEntries(buffer *bytes.Buffer, context *SignContext) error {
|
||||
// Write updated entries first
|
||||
for _, entry := range context.updatedXrefEntries {
|
||||
writeXrefStreamLine(buffer, 1, int(entry.Offset), 0)
|
||||
}
|
||||
|
||||
// Write new entries
|
||||
for _, entry := range context.newXrefEntries {
|
||||
writeXrefStreamLine(buffer, 1, int(entry.Offset), 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeXrefStream applies the appropriate encoding to the xref stream.
|
||||
func encodeXrefStream(data []byte, predictor int64) ([]byte, error) {
|
||||
// Use FlateDecode without prediction for xref streams
|
||||
var b bytes.Buffer
|
||||
w := zlib.NewWriter(&b)
|
||||
if _, err := w.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Close()
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// writeXrefStreamHeader writes the header for the xref stream.
|
||||
func writeXrefStreamHeader(buffer *bytes.Buffer, context *SignContext, streamLength int) error {
|
||||
id := context.PDFReader.Trailer().Key("ID")
|
||||
|
||||
// Calculate total entries and create index array
|
||||
totalEntries := uint32(context.PDFReader.XrefInformation.ItemCount)
|
||||
var indexArray []uint32
|
||||
|
||||
// Add existing entries section
|
||||
if len(context.updatedXrefEntries) > 0 {
|
||||
for _, entry := range context.updatedXrefEntries {
|
||||
indexArray = append(indexArray, entry.ID, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Add new entries section
|
||||
if len(context.newXrefEntries) > 0 {
|
||||
indexArray = append(indexArray, context.lastXrefID+1, uint32(len(context.newXrefEntries)))
|
||||
totalEntries += uint32(len(context.newXrefEntries))
|
||||
}
|
||||
|
||||
buffer.WriteString("<< /Type /XRef\n")
|
||||
buffer.WriteString(fmt.Sprintf(" /Length %d\n", streamLength))
|
||||
buffer.WriteString(" /Filter /FlateDecode\n")
|
||||
// Change W array to [1 4 1] to accommodate larger offsets
|
||||
buffer.WriteString(" /W [ 1 4 1 ]\n")
|
||||
buffer.WriteString(fmt.Sprintf(" /Prev %d\n", context.PDFReader.XrefInformation.StartPos))
|
||||
buffer.WriteString(fmt.Sprintf(" /Size %d\n", totalEntries+1))
|
||||
|
||||
// Write index array if we have entries
|
||||
if len(indexArray) > 0 {
|
||||
buffer.WriteString(" /Index [")
|
||||
for _, idx := range indexArray {
|
||||
buffer.WriteString(fmt.Sprintf(" %d", idx))
|
||||
}
|
||||
buffer.WriteString(" ]\n")
|
||||
}
|
||||
|
||||
buffer.WriteString(fmt.Sprintf(" /Root %d 0 R\n", context.CatalogData.ObjectId))
|
||||
|
||||
if !id.IsNull() {
|
||||
id0 := hex.EncodeToString([]byte(id.Index(0).RawString()))
|
||||
id1 := hex.EncodeToString([]byte(id.Index(1).RawString()))
|
||||
buffer.WriteString(fmt.Sprintf(" /ID [<%s><%s>]\n", id0, id1))
|
||||
}
|
||||
|
||||
buffer.WriteString(">>\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeXrefStreamContent writes the content of the xref stream.
|
||||
func writeXrefStreamContent(buffer *bytes.Buffer, streamBytes []byte) error {
|
||||
if _, err := io.WriteString(buffer, "stream\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := buffer.Write(streamBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.WriteString(buffer, "\nendstream\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeXrefStreamLine writes a single line in the xref stream.
|
||||
func writeXrefStreamLine(b *bytes.Buffer, xreftype byte, offset int, gen byte) {
|
||||
// Write type (1 byte)
|
||||
b.WriteByte(xreftype)
|
||||
|
||||
// Write offset (4 bytes)
|
||||
offsetBytes := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(offsetBytes, uint32(offset))
|
||||
b.Write(offsetBytes)
|
||||
|
||||
// Write generation (1 byte)
|
||||
b.WriteByte(gen)
|
||||
}
|
||||
|
||||
// EncodePNGSUBBytes encodes data using PNG SUB filter.
|
||||
func EncodePNGSUBBytes(columns int, data []byte) ([]byte, error) {
|
||||
rowCount := len(data) / columns
|
||||
if len(data)%columns != 0 {
|
||||
return nil, errors.New("invalid row/column length")
|
||||
}
|
||||
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
tmpRowData := make([]byte, columns)
|
||||
for i := 0; i < rowCount; i++ {
|
||||
rowData := data[columns*i : columns*(i+1)]
|
||||
tmpRowData[0] = rowData[0]
|
||||
for j := 1; j < columns; j++ {
|
||||
tmpRowData[j] = byte(int(rowData[j]-rowData[j-1]) % 256)
|
||||
}
|
||||
|
||||
buffer.WriteByte(1)
|
||||
buffer.Write(tmpRowData)
|
||||
}
|
||||
|
||||
data = buffer.Bytes()
|
||||
|
||||
var b bytes.Buffer
|
||||
w := zlib.NewWriter(&b)
|
||||
if _, err := w.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Close()
|
||||
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// EncodePNGUPBytes encodes data using PNG UP filter.
|
||||
func EncodePNGUPBytes(columns int, data []byte) ([]byte, error) {
|
||||
rowCount := len(data) / columns
|
||||
if len(data)%columns != 0 {
|
||||
return nil, errors.New("invalid row/column length")
|
||||
}
|
||||
|
||||
prevRowData := make([]byte, columns)
|
||||
|
||||
// Initially all previous data is zero.
|
||||
for i := 0; i < columns; i++ {
|
||||
prevRowData[i] = 0
|
||||
}
|
||||
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
tmpRowData := make([]byte, columns)
|
||||
for i := 0; i < rowCount; i++ {
|
||||
rowData := data[columns*i : columns*(i+1)]
|
||||
for j := 0; j < columns; j++ {
|
||||
tmpRowData[j] = byte(int(rowData[j]-prevRowData[j]) % 256)
|
||||
}
|
||||
|
||||
// Save the previous row for prediction.
|
||||
copy(prevRowData, rowData)
|
||||
|
||||
buffer.WriteByte(2)
|
||||
buffer.Write(tmpRowData)
|
||||
}
|
||||
|
||||
data = buffer.Bytes()
|
||||
|
||||
var b bytes.Buffer
|
||||
w := zlib.NewWriter(&b)
|
||||
if _, err := w.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Close()
|
||||
|
||||
return b.Bytes(), nil
|
||||
}
|
219
sign/pdfxref_stream_test.go
Normal file
219
sign/pdfxref_stream_test.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package sign
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/digitorus/pdf"
|
||||
"github.com/mattetti/filebuffer"
|
||||
)
|
||||
|
||||
func TestWriteXrefStreamLine(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
xreftype byte
|
||||
offset int
|
||||
gen byte
|
||||
expected []byte
|
||||
}{
|
||||
{
|
||||
name: "basic entry",
|
||||
xreftype: 1,
|
||||
offset: 1234,
|
||||
gen: 0,
|
||||
expected: []byte{1, 0, 0, 4, 210, 0},
|
||||
},
|
||||
{
|
||||
name: "zero entry",
|
||||
xreftype: 0,
|
||||
offset: 0,
|
||||
gen: 0,
|
||||
expected: []byte{0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
name: "max offset",
|
||||
xreftype: 1,
|
||||
offset: 16777215, // 2^24 - 1
|
||||
gen: 255,
|
||||
expected: []byte{1, 0, 255, 255, 255, 255},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
writeXrefStreamLine(&buf, tt.xreftype, tt.offset, tt.gen)
|
||||
result := buf.Bytes()
|
||||
if !bytes.Equal(result, tt.expected) {
|
||||
t.Errorf("writeXrefStreamLine() = %v, want %v", result, tt.expected)
|
||||
t.Errorf("hex: got %x, want %x", result, tt.expected)
|
||||
}
|
||||
if len(result) != xrefStreamColumns {
|
||||
t.Errorf("incorrect length: got %d bytes, want %d bytes", len(result), xrefStreamColumns)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodePNGSUBBytes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
columns int
|
||||
input []byte
|
||||
expected []byte
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid encoding",
|
||||
columns: 3,
|
||||
input: []byte{10, 20, 30, 40, 50, 60},
|
||||
expected: []byte{1, 10, 10, 10, 1, 40, 10, 10},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid columns",
|
||||
columns: 4,
|
||||
input: []byte{1, 2, 3, 4, 5},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := EncodePNGSUBBytes(tt.columns, tt.input)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("EncodePNGSUBBytes() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
if got == nil {
|
||||
t.Error("EncodePNGSUBBytes() returned nil for valid input")
|
||||
}
|
||||
// Decompress the result
|
||||
r, err := zlib.NewReader(bytes.NewReader(got))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create zlib reader: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
decompressed, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decompress: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(decompressed, tt.expected) {
|
||||
t.Errorf("EncodePNGSUBBytes() = %v, want %v", decompressed, tt.expected)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodePNGUPBytes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
columns int
|
||||
input []byte
|
||||
expected []byte
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid encoding",
|
||||
columns: 3,
|
||||
input: []byte{10, 20, 30, 40, 50, 60},
|
||||
expected: []byte{2, 10, 20, 30, 2, 30, 30, 30},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid columns",
|
||||
columns: 4,
|
||||
input: []byte{1, 2, 3, 4, 5},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := EncodePNGUPBytes(tt.columns, tt.input)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("EncodePNGUPBytes() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
if got == nil {
|
||||
t.Error("EncodePNGUPBytes() returned nil for valid input")
|
||||
}
|
||||
// Decompress the result
|
||||
r, err := zlib.NewReader(bytes.NewReader(got))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create zlib reader: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
decompressed, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decompress: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(decompressed, tt.expected) {
|
||||
t.Errorf("EncodePNGUPBytes() = %v, want %v", decompressed, tt.expected)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteXrefStream(t *testing.T) {
|
||||
input_file, err := os.Open("../testfiles/testfile12.pdf")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open test file: %v", err)
|
||||
}
|
||||
defer input_file.Close()
|
||||
|
||||
finfo, err := input_file.Stat()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get file info: %v", err)
|
||||
}
|
||||
|
||||
r, err := pdf.NewReader(input_file, finfo.Size())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PDF reader: %v", err)
|
||||
}
|
||||
|
||||
outputBuf := &filebuffer.Buffer{
|
||||
Buff: new(bytes.Buffer),
|
||||
}
|
||||
context := &SignContext{
|
||||
InputFile: input_file,
|
||||
PDFReader: r,
|
||||
OutputBuffer: outputBuf,
|
||||
newXrefEntries: []xrefEntry{
|
||||
{ID: 1, Offset: 100},
|
||||
},
|
||||
}
|
||||
|
||||
err = context.writeXrefStream()
|
||||
if err != nil {
|
||||
t.Errorf("writeXrefStream() error = %v", err)
|
||||
}
|
||||
|
||||
// Check if output contains required xref stream elements
|
||||
output := outputBuf.Buff.String()
|
||||
requiredElements := []string{
|
||||
"/Type /XRef",
|
||||
"/Filter /FlateDecode",
|
||||
"/W [ 1 4 1 ]",
|
||||
"stream\n",
|
||||
"endstream",
|
||||
}
|
||||
|
||||
for _, elem := range requiredElements {
|
||||
if !strings.Contains(output, elem) {
|
||||
t.Errorf("Output missing required element: %s", elem)
|
||||
}
|
||||
}
|
||||
}
|
42
sign/pdfxref_table.go
Normal file
42
sign/pdfxref_table.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package sign
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// writeIncrXrefTable writes the incremental cross-reference table to the output buffer.
|
||||
func (context *SignContext) writeIncrXrefTable() error {
|
||||
// Write xref header
|
||||
if _, err := context.OutputBuffer.Write([]byte("xref\n")); err != nil {
|
||||
return fmt.Errorf("failed to write incremental xref header: %w", err)
|
||||
}
|
||||
|
||||
// Write updated entries
|
||||
for _, entry := range context.updatedXrefEntries {
|
||||
pageXrefObj := fmt.Sprintf("%d %d\n", entry.ID, 1)
|
||||
if _, err := context.OutputBuffer.Write([]byte(pageXrefObj)); err != nil {
|
||||
return fmt.Errorf("failed to write updated xref object: %w", err)
|
||||
}
|
||||
|
||||
xrefLine := fmt.Sprintf("%010d 00000 n\r\n", entry.Offset)
|
||||
if _, err := context.OutputBuffer.Write([]byte(xrefLine)); err != nil {
|
||||
return fmt.Errorf("failed to write updated incremental xref entry: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write xref subsection header
|
||||
startXrefObj := fmt.Sprintf("%d %d\n", context.lastXrefID+1, len(context.newXrefEntries))
|
||||
if _, err := context.OutputBuffer.Write([]byte(startXrefObj)); err != nil {
|
||||
return fmt.Errorf("failed to write starting xref object: %w", err)
|
||||
}
|
||||
|
||||
// Write new entries
|
||||
for _, entry := range context.newXrefEntries {
|
||||
xrefLine := fmt.Sprintf("%010d 00000 n\r\n", entry.Offset)
|
||||
if _, err := context.OutputBuffer.Write([]byte(xrefLine)); err != nil {
|
||||
return fmt.Errorf("failed to write incremental xref entry: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
47
sign/pdfxref_table_test.go
Normal file
47
sign/pdfxref_table_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package sign
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/mattetti/filebuffer"
|
||||
)
|
||||
|
||||
func TestWriteIncrXrefTable(t *testing.T) {
|
||||
// Test setup
|
||||
context := &SignContext{
|
||||
OutputBuffer: &filebuffer.Buffer{
|
||||
Buff: new(bytes.Buffer),
|
||||
},
|
||||
lastXrefID: 100,
|
||||
updatedXrefEntries: []xrefEntry{
|
||||
{ID: 50, Offset: 1234},
|
||||
{ID: 51, Offset: 5678},
|
||||
},
|
||||
newXrefEntries: []xrefEntry{
|
||||
{ID: 101, Offset: 9012},
|
||||
{ID: 102, Offset: 3456},
|
||||
},
|
||||
}
|
||||
|
||||
// Execute test
|
||||
err := context.writeIncrXrefTable()
|
||||
if err != nil {
|
||||
t.Fatalf("writeIncrXrefTable failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify output
|
||||
expected := "xref\n" +
|
||||
"50 1\n" +
|
||||
"0000001234 00000 n\r\n" +
|
||||
"51 1\n" +
|
||||
"0000005678 00000 n\r\n" +
|
||||
"101 2\n" +
|
||||
"0000009012 00000 n\r\n" +
|
||||
"0000003456 00000 n\r\n"
|
||||
|
||||
got := context.OutputBuffer.Buff.String()
|
||||
if got != expected {
|
||||
t.Errorf("writeIncrXrefTable output mismatch\ngot:\n%s\nwant:\n%s", got, expected)
|
||||
}
|
||||
}
|
@@ -2,8 +2,6 @@ package sign
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -61,212 +59,6 @@ func TestGetLastObjectIDFromXref(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteXrefStreamLine(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
xreftype byte
|
||||
offset int
|
||||
gen byte
|
||||
expected []byte
|
||||
}{
|
||||
{
|
||||
name: "basic entry",
|
||||
xreftype: 1,
|
||||
offset: 1234,
|
||||
gen: 0,
|
||||
expected: []byte{1, 0, 0, 4, 210, 0},
|
||||
},
|
||||
{
|
||||
name: "zero entry",
|
||||
xreftype: 0,
|
||||
offset: 0,
|
||||
gen: 0,
|
||||
expected: []byte{0, 0, 0, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
name: "max offset",
|
||||
xreftype: 1,
|
||||
offset: 16777215, // 2^24 - 1
|
||||
gen: 255,
|
||||
expected: []byte{1, 0, 255, 255, 255, 255},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
writeXrefStreamLine(&buf, tt.xreftype, tt.offset, tt.gen)
|
||||
result := buf.Bytes()
|
||||
if !bytes.Equal(result, tt.expected) {
|
||||
t.Errorf("writeXrefStreamLine() = %v, want %v", result, tt.expected)
|
||||
t.Errorf("hex: got %x, want %x", result, tt.expected)
|
||||
}
|
||||
if len(result) != xrefStreamColumns {
|
||||
t.Errorf("incorrect length: got %d bytes, want %d bytes", len(result), xrefStreamColumns)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodePNGSUBBytes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
columns int
|
||||
input []byte
|
||||
expected []byte
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid encoding",
|
||||
columns: 3,
|
||||
input: []byte{10, 20, 30, 40, 50, 60},
|
||||
expected: []byte{1, 10, 10, 10, 1, 40, 10, 10},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid columns",
|
||||
columns: 4,
|
||||
input: []byte{1, 2, 3, 4, 5},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := EncodePNGSUBBytes(tt.columns, tt.input)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("EncodePNGSUBBytes() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
if got == nil {
|
||||
t.Error("EncodePNGSUBBytes() returned nil for valid input")
|
||||
}
|
||||
// Decompress the result
|
||||
r, err := zlib.NewReader(bytes.NewReader(got))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create zlib reader: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
decompressed, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decompress: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(decompressed, tt.expected) {
|
||||
t.Errorf("EncodePNGSUBBytes() = %v, want %v", decompressed, tt.expected)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodePNGUPBytes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
columns int
|
||||
input []byte
|
||||
expected []byte
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid encoding",
|
||||
columns: 3,
|
||||
input: []byte{10, 20, 30, 40, 50, 60},
|
||||
expected: []byte{2, 10, 20, 30, 2, 30, 30, 30},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "invalid columns",
|
||||
columns: 4,
|
||||
input: []byte{1, 2, 3, 4, 5},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := EncodePNGUPBytes(tt.columns, tt.input)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("EncodePNGUPBytes() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
if got == nil {
|
||||
t.Error("EncodePNGUPBytes() returned nil for valid input")
|
||||
}
|
||||
// Decompress the result
|
||||
r, err := zlib.NewReader(bytes.NewReader(got))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create zlib reader: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
decompressed, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decompress: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(decompressed, tt.expected) {
|
||||
t.Errorf("EncodePNGUPBytes() = %v, want %v", decompressed, tt.expected)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteXrefStream(t *testing.T) {
|
||||
input_file, err := os.Open("../testfiles/testfile12.pdf")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open test file: %v", err)
|
||||
}
|
||||
defer input_file.Close()
|
||||
|
||||
finfo, err := input_file.Stat()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get file info: %v", err)
|
||||
}
|
||||
|
||||
r, err := pdf.NewReader(input_file, finfo.Size())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PDF reader: %v", err)
|
||||
}
|
||||
|
||||
outputBuf := &filebuffer.Buffer{
|
||||
Buff: new(bytes.Buffer),
|
||||
}
|
||||
context := &SignContext{
|
||||
InputFile: input_file,
|
||||
PDFReader: r,
|
||||
OutputBuffer: outputBuf,
|
||||
newXrefEntries: []xrefEntry{
|
||||
{ID: 1, Offset: 100},
|
||||
},
|
||||
}
|
||||
|
||||
err = context.writeXrefStream()
|
||||
if err != nil {
|
||||
t.Errorf("writeXrefStream() error = %v", err)
|
||||
}
|
||||
|
||||
// Check if output contains required xref stream elements
|
||||
output := outputBuf.Buff.String()
|
||||
requiredElements := []string{
|
||||
"/Type /XRef",
|
||||
"/Filter /FlateDecode",
|
||||
"/W [ 1 4 1 ]",
|
||||
"stream\n",
|
||||
"endstream",
|
||||
}
|
||||
|
||||
for _, elem := range requiredElements {
|
||||
if !strings.Contains(output, elem) {
|
||||
t.Errorf("Output missing required element: %s", elem)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddObject(t *testing.T) {
|
||||
outputBuf := &filebuffer.Buffer{
|
||||
Buff: new(bytes.Buffer),
|
||||
@@ -328,3 +120,172 @@ func TestAddObject(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateObject(t *testing.T) {
|
||||
context := &SignContext{
|
||||
OutputBuffer: &filebuffer.Buffer{
|
||||
Buff: new(bytes.Buffer),
|
||||
},
|
||||
lastXrefID: 10,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
objectID uint32
|
||||
object []byte
|
||||
expectedText string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid update",
|
||||
objectID: 5,
|
||||
object: []byte("updated content"),
|
||||
expectedText: "5 0 obj\nupdated content\nendobj\n",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "update with whitespace",
|
||||
objectID: 8,
|
||||
object: []byte(" updated content "),
|
||||
expectedText: "8 0 obj\nupdated content\nendobj\n",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
context.OutputBuffer.Buff.Reset()
|
||||
err := context.updateObject(tt.objectID, tt.object)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("updateObject() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
got := context.OutputBuffer.Buff.String()
|
||||
if !strings.Contains(got, tt.expectedText) {
|
||||
t.Errorf("updateObject() output = %q, want to contain %q", got, tt.expectedText)
|
||||
}
|
||||
|
||||
// Check xref entry
|
||||
if len(context.updatedXrefEntries) == 0 {
|
||||
t.Error("No updated xref entry added")
|
||||
} else {
|
||||
lastEntry := context.updatedXrefEntries[len(context.updatedXrefEntries)-1]
|
||||
if lastEntry.ID != tt.objectID {
|
||||
t.Errorf("xref entry ID = %v, want %v", lastEntry.ID, tt.objectID)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteObject(t *testing.T) {
|
||||
context := &SignContext{
|
||||
OutputBuffer: &filebuffer.Buffer{
|
||||
Buff: new(bytes.Buffer),
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
objectID uint32
|
||||
object []byte
|
||||
expectedText string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "simple object",
|
||||
objectID: 1,
|
||||
object: []byte("test content"),
|
||||
expectedText: "1 0 obj\ntest content\nendobj\n",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty object",
|
||||
objectID: 2,
|
||||
object: []byte{},
|
||||
expectedText: "2 0 obj\n\nendobj\n",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
context.OutputBuffer.Buff.Reset()
|
||||
err := context.writeObject(tt.objectID, tt.object)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("writeObject() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
got := context.OutputBuffer.Buff.String()
|
||||
if !strings.Contains(got, tt.expectedText) {
|
||||
t.Errorf("writeObject() output = %q, want to contain %q", got, tt.expectedText)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteIncrXrefTable tests the writeIncrXref function with a table xref type.
|
||||
func TestWriteXrefTypeTable(t *testing.T) {
|
||||
context := &SignContext{
|
||||
OutputBuffer: &filebuffer.Buffer{
|
||||
Buff: new(bytes.Buffer),
|
||||
},
|
||||
newXrefEntries: []xrefEntry{
|
||||
{ID: 1, Offset: 100, Generation: 0, Free: false},
|
||||
{ID: 2, Offset: 200, Generation: 0, Free: false},
|
||||
},
|
||||
lastXrefID: 2,
|
||||
}
|
||||
|
||||
context.PDFReader = &pdf.Reader{
|
||||
XrefInformation: pdf.ReaderXrefInformation{
|
||||
Type: "table",
|
||||
},
|
||||
}
|
||||
|
||||
err := context.writeXref()
|
||||
if err != nil {
|
||||
t.Errorf("writeXref() error = %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
got := context.OutputBuffer.Buff.String()
|
||||
expect := "\nxref\n3 2\n0000000100 00000 n\r\n0000000200 00000 n\r\n"
|
||||
if got != expect {
|
||||
t.Errorf("writeXref() output = %q, want %q", got, expect)
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteIncrXrefTable tests the writeIncrXref function with a xref stream type.
|
||||
func TestWriteXrefTypeStream(t *testing.T) {
|
||||
context := &SignContext{
|
||||
OutputBuffer: &filebuffer.Buffer{
|
||||
Buff: new(bytes.Buffer),
|
||||
},
|
||||
newXrefEntries: []xrefEntry{
|
||||
{ID: 1, Offset: 100, Generation: 0, Free: false},
|
||||
{ID: 2, Offset: 200, Generation: 0, Free: false},
|
||||
},
|
||||
lastXrefID: 2,
|
||||
}
|
||||
|
||||
context.PDFReader = &pdf.Reader{
|
||||
XrefInformation: pdf.ReaderXrefInformation{
|
||||
Type: "stream",
|
||||
},
|
||||
}
|
||||
|
||||
err := context.writeXref()
|
||||
if err != nil {
|
||||
t.Errorf("writeXref() error = %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
got := context.OutputBuffer.Buff.String()
|
||||
expect := "\n\n5 0 obj\n<< /Type /XRef\n /Length 22\n /Filter /FlateDecode\n /W [ 1 4 1 ]\n /Prev 0\n /Size 3\n /Index [ 3 2 ]\n /Root 0 0 R\n>>\nstream\nx\x9cbd``Ha\x00\x91'\x18\x00\x01\x00\x00\xff\xff\x04\xce\x01/\nendstream\nendobj\n"
|
||||
if got != expect {
|
||||
t.Errorf("writeXref() output = %q, want %q", got, expect)
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user