diff --git a/.gen/go/sqlblobs/sqlblobs.go b/.gen/go/sqlblobs/sqlblobs.go index d00a9593d86..1e7a231421e 100644 --- a/.gen/go/sqlblobs/sqlblobs.go +++ b/.gen/go/sqlblobs/sqlblobs.go @@ -12584,18 +12584,13 @@ func (v *TimerInfo) IsSetTaskID() bool { return v != nil && v.TaskID != nil } -type TimerTaskInfo struct { - DomainID []byte `json:"domainID,omitempty"` - WorkflowID *string `json:"workflowID,omitempty"` - RunID []byte `json:"runID,omitempty"` - TaskType *int16 `json:"taskType,omitempty"` - TimeoutType *int16 `json:"timeoutType,omitempty"` - Version *int64 `json:"version,omitempty"` - ScheduleAttempt *int64 `json:"scheduleAttempt,omitempty"` - EventID *int64 `json:"eventID,omitempty"` +type TimerReference struct { + TaskID *int64 `json:"taskID,omitempty"` + VisibilityTimestamp *int64 `json:"visibilityTimestamp,omitempty"` + TimeoutType *int16 `json:"TimeoutType,omitempty"` } -// ToWire translates a TimerTaskInfo struct into a Thrift-level intermediate +// ToWire translates a TimerReference struct into a Thrift-level intermediate // representation. This intermediate representation may be serialized // into bytes using a ThriftRW protocol implementation. // @@ -12610,44 +12605,28 @@ type TimerTaskInfo struct { // if err := binaryProtocol.Encode(x, writer); err != nil { // return err // } -func (v *TimerTaskInfo) ToWire() (wire.Value, error) { +func (v *TimerReference) ToWire() (wire.Value, error) { var ( - fields [8]wire.Field + fields [3]wire.Field i int = 0 w wire.Value err error ) - if v.DomainID != nil { - w, err = wire.NewValueBinary(v.DomainID), error(nil) + if v.TaskID != nil { + w, err = wire.NewValueI64(*(v.TaskID)), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 10, Value: w} i++ } - if v.WorkflowID != nil { - w, err = wire.NewValueString(*(v.WorkflowID)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 12, Value: w} - i++ - } - if v.RunID != nil { - w, err = wire.NewValueBinary(v.RunID), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 14, Value: w} - i++ - } - if v.TaskType != nil { - w, err = wire.NewValueI16(*(v.TaskType)), error(nil) + if v.VisibilityTimestamp != nil { + w, err = wire.NewValueI64(*(v.VisibilityTimestamp)), error(nil) if err != nil { return w, err } - fields[i] = wire.Field{ID: 16, Value: w} + fields[i] = wire.Field{ID: 11, Value: w} i++ } if v.TimeoutType != nil { @@ -12655,42 +12634,18 @@ func (v *TimerTaskInfo) ToWire() (wire.Value, error) { if err != nil { return w, err } - fields[i] = wire.Field{ID: 18, Value: w} - i++ - } - if v.Version != nil { - w, err = wire.NewValueI64(*(v.Version)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 20, Value: w} - i++ - } - if v.ScheduleAttempt != nil { - w, err = wire.NewValueI64(*(v.ScheduleAttempt)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 22, Value: w} - i++ - } - if v.EventID != nil { - w, err = wire.NewValueI64(*(v.EventID)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 24, Value: w} + fields[i] = wire.Field{ID: 13, Value: w} i++ } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } -// FromWire deserializes a TimerTaskInfo struct from its Thrift-level +// FromWire deserializes a TimerReference struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. // -// An error is returned if we were unable to build a TimerTaskInfo struct +// An error is returned if we were unable to build a TimerReference struct // from the provided intermediate representation. // // x, err := binaryProtocol.Decode(reader, wire.TStruct) @@ -12698,87 +12653,41 @@ func (v *TimerTaskInfo) ToWire() (wire.Value, error) { // return nil, err // } // -// var v TimerTaskInfo +// var v TimerReference // if err := v.FromWire(x); err != nil { // return nil, err // } // return &v, nil -func (v *TimerTaskInfo) FromWire(w wire.Value) error { +func (v *TimerReference) FromWire(w wire.Value) error { var err error for _, field := range w.GetStruct().Fields { switch field.ID { case 10: - if field.Value.Type() == wire.TBinary { - v.DomainID, err = field.Value.GetBinary(), error(nil) - if err != nil { - return err - } - - } - case 12: - if field.Value.Type() == wire.TBinary { - var x string - x, err = field.Value.GetString(), error(nil) - v.WorkflowID = &x - if err != nil { - return err - } - - } - case 14: - if field.Value.Type() == wire.TBinary { - v.RunID, err = field.Value.GetBinary(), error(nil) - if err != nil { - return err - } - - } - case 16: - if field.Value.Type() == wire.TI16 { - var x int16 - x, err = field.Value.GetI16(), error(nil) - v.TaskType = &x - if err != nil { - return err - } - - } - case 18: - if field.Value.Type() == wire.TI16 { - var x int16 - x, err = field.Value.GetI16(), error(nil) - v.TimeoutType = &x - if err != nil { - return err - } - - } - case 20: if field.Value.Type() == wire.TI64 { var x int64 x, err = field.Value.GetI64(), error(nil) - v.Version = &x + v.TaskID = &x if err != nil { return err } } - case 22: + case 11: if field.Value.Type() == wire.TI64 { var x int64 x, err = field.Value.GetI64(), error(nil) - v.ScheduleAttempt = &x + v.VisibilityTimestamp = &x if err != nil { return err } } - case 24: - if field.Value.Type() == wire.TI64 { - var x int64 - x, err = field.Value.GetI64(), error(nil) - v.EventID = &x + case 13: + if field.Value.Type() == wire.TI16 { + var x int16 + x, err = field.Value.GetI16(), error(nil) + v.TimeoutType = &x if err != nil { return err } @@ -12790,44 +12699,20 @@ func (v *TimerTaskInfo) FromWire(w wire.Value) error { return nil } -// Encode serializes a TimerTaskInfo struct directly into bytes, without going +// Encode serializes a TimerReference struct directly into bytes, without going // through an intermediary type. // -// An error is returned if a TimerTaskInfo struct could not be encoded. -func (v *TimerTaskInfo) Encode(sw stream.Writer) error { +// An error is returned if a TimerReference struct could not be encoded. +func (v *TimerReference) Encode(sw stream.Writer) error { if err := sw.WriteStructBegin(); err != nil { return err } - if v.DomainID != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 10, Type: wire.TBinary}); err != nil { - return err - } - if err := sw.WriteBinary(v.DomainID); err != nil { - return err - } - if err := sw.WriteFieldEnd(); err != nil { - return err - } - } - - if v.WorkflowID != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 12, Type: wire.TBinary}); err != nil { - return err - } - if err := sw.WriteString(*(v.WorkflowID)); err != nil { - return err - } - if err := sw.WriteFieldEnd(); err != nil { - return err - } - } - - if v.RunID != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 14, Type: wire.TBinary}); err != nil { + if v.TaskID != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 10, Type: wire.TI64}); err != nil { return err } - if err := sw.WriteBinary(v.RunID); err != nil { + if err := sw.WriteInt64(*(v.TaskID)); err != nil { return err } if err := sw.WriteFieldEnd(); err != nil { @@ -12835,11 +12720,11 @@ func (v *TimerTaskInfo) Encode(sw stream.Writer) error { } } - if v.TaskType != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 16, Type: wire.TI16}); err != nil { + if v.VisibilityTimestamp != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 11, Type: wire.TI64}); err != nil { return err } - if err := sw.WriteInt16(*(v.TaskType)); err != nil { + if err := sw.WriteInt64(*(v.VisibilityTimestamp)); err != nil { return err } if err := sw.WriteFieldEnd(); err != nil { @@ -12848,7 +12733,7 @@ func (v *TimerTaskInfo) Encode(sw stream.Writer) error { } if v.TimeoutType != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 18, Type: wire.TI16}); err != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 13, Type: wire.TI16}); err != nil { return err } if err := sw.WriteInt16(*(v.TimeoutType)); err != nil { @@ -12859,51 +12744,15 @@ func (v *TimerTaskInfo) Encode(sw stream.Writer) error { } } - if v.Version != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 20, Type: wire.TI64}); err != nil { - return err - } - if err := sw.WriteInt64(*(v.Version)); err != nil { - return err - } - if err := sw.WriteFieldEnd(); err != nil { - return err - } - } - - if v.ScheduleAttempt != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 22, Type: wire.TI64}); err != nil { - return err - } - if err := sw.WriteInt64(*(v.ScheduleAttempt)); err != nil { - return err - } - if err := sw.WriteFieldEnd(); err != nil { - return err - } - } - - if v.EventID != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 24, Type: wire.TI64}); err != nil { - return err - } - if err := sw.WriteInt64(*(v.EventID)); err != nil { - return err - } - if err := sw.WriteFieldEnd(); err != nil { - return err - } - } - return sw.WriteStructEnd() } -// Decode deserializes a TimerTaskInfo struct directly from its Thrift-level +// Decode deserializes a TimerReference struct directly from its Thrift-level // representation, without going through an intemediary type. // -// An error is returned if a TimerTaskInfo struct could not be generated from the wire +// An error is returned if a TimerReference struct could not be generated from the wire // representation. -func (v *TimerTaskInfo) Decode(sr stream.Reader) error { +func (v *TimerReference) Decode(sr stream.Reader) error { if err := sr.ReadStructBegin(); err != nil { return err @@ -12916,62 +12765,26 @@ func (v *TimerTaskInfo) Decode(sr stream.Reader) error { for ok { switch { - case fh.ID == 10 && fh.Type == wire.TBinary: - v.DomainID, err = sr.ReadBinary() - if err != nil { - return err - } - - case fh.ID == 12 && fh.Type == wire.TBinary: - var x string - x, err = sr.ReadString() - v.WorkflowID = &x - if err != nil { - return err - } - - case fh.ID == 14 && fh.Type == wire.TBinary: - v.RunID, err = sr.ReadBinary() - if err != nil { - return err - } - - case fh.ID == 16 && fh.Type == wire.TI16: - var x int16 - x, err = sr.ReadInt16() - v.TaskType = &x - if err != nil { - return err - } - - case fh.ID == 18 && fh.Type == wire.TI16: - var x int16 - x, err = sr.ReadInt16() - v.TimeoutType = &x - if err != nil { - return err - } - - case fh.ID == 20 && fh.Type == wire.TI64: + case fh.ID == 10 && fh.Type == wire.TI64: var x int64 x, err = sr.ReadInt64() - v.Version = &x + v.TaskID = &x if err != nil { return err } - case fh.ID == 22 && fh.Type == wire.TI64: + case fh.ID == 11 && fh.Type == wire.TI64: var x int64 x, err = sr.ReadInt64() - v.ScheduleAttempt = &x + v.VisibilityTimestamp = &x if err != nil { return err } - case fh.ID == 24 && fh.Type == wire.TI64: - var x int64 - x, err = sr.ReadInt64() - v.EventID = &x + case fh.ID == 13 && fh.Type == wire.TI16: + var x int16 + x, err = sr.ReadInt16() + v.TimeoutType = &x if err != nil { return err } @@ -12998,185 +12811,105 @@ func (v *TimerTaskInfo) Decode(sr stream.Reader) error { return nil } -// String returns a readable string representation of a TimerTaskInfo +// String returns a readable string representation of a TimerReference // struct. -func (v *TimerTaskInfo) String() string { +func (v *TimerReference) String() string { if v == nil { return "" } - var fields [8]string + var fields [3]string i := 0 - if v.DomainID != nil { - fields[i] = fmt.Sprintf("DomainID: %v", v.DomainID) - i++ - } - if v.WorkflowID != nil { - fields[i] = fmt.Sprintf("WorkflowID: %v", *(v.WorkflowID)) - i++ - } - if v.RunID != nil { - fields[i] = fmt.Sprintf("RunID: %v", v.RunID) + if v.TaskID != nil { + fields[i] = fmt.Sprintf("TaskID: %v", *(v.TaskID)) i++ } - if v.TaskType != nil { - fields[i] = fmt.Sprintf("TaskType: %v", *(v.TaskType)) + if v.VisibilityTimestamp != nil { + fields[i] = fmt.Sprintf("VisibilityTimestamp: %v", *(v.VisibilityTimestamp)) i++ } if v.TimeoutType != nil { fields[i] = fmt.Sprintf("TimeoutType: %v", *(v.TimeoutType)) i++ } - if v.Version != nil { - fields[i] = fmt.Sprintf("Version: %v", *(v.Version)) - i++ - } - if v.ScheduleAttempt != nil { - fields[i] = fmt.Sprintf("ScheduleAttempt: %v", *(v.ScheduleAttempt)) - i++ - } - if v.EventID != nil { - fields[i] = fmt.Sprintf("EventID: %v", *(v.EventID)) - i++ - } - return fmt.Sprintf("TimerTaskInfo{%v}", strings.Join(fields[:i], ", ")) + return fmt.Sprintf("TimerReference{%v}", strings.Join(fields[:i], ", ")) } -// Equals returns true if all the fields of this TimerTaskInfo match the -// provided TimerTaskInfo. +// Equals returns true if all the fields of this TimerReference match the +// provided TimerReference. // // This function performs a deep comparison. -func (v *TimerTaskInfo) Equals(rhs *TimerTaskInfo) bool { +func (v *TimerReference) Equals(rhs *TimerReference) bool { if v == nil { return rhs == nil } else if rhs == nil { return false } - if !((v.DomainID == nil && rhs.DomainID == nil) || (v.DomainID != nil && rhs.DomainID != nil && bytes.Equal(v.DomainID, rhs.DomainID))) { - return false - } - if !_String_EqualsPtr(v.WorkflowID, rhs.WorkflowID) { - return false - } - if !((v.RunID == nil && rhs.RunID == nil) || (v.RunID != nil && rhs.RunID != nil && bytes.Equal(v.RunID, rhs.RunID))) { + if !_I64_EqualsPtr(v.TaskID, rhs.TaskID) { return false } - if !_I16_EqualsPtr(v.TaskType, rhs.TaskType) { + if !_I64_EqualsPtr(v.VisibilityTimestamp, rhs.VisibilityTimestamp) { return false } if !_I16_EqualsPtr(v.TimeoutType, rhs.TimeoutType) { return false } - if !_I64_EqualsPtr(v.Version, rhs.Version) { - return false - } - if !_I64_EqualsPtr(v.ScheduleAttempt, rhs.ScheduleAttempt) { - return false - } - if !_I64_EqualsPtr(v.EventID, rhs.EventID) { - return false - } return true } // MarshalLogObject implements zapcore.ObjectMarshaler, enabling -// fast logging of TimerTaskInfo. -func (v *TimerTaskInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { +// fast logging of TimerReference. +func (v *TimerReference) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { if v == nil { return nil } - if v.DomainID != nil { - enc.AddString("domainID", base64.StdEncoding.EncodeToString(v.DomainID)) - } - if v.WorkflowID != nil { - enc.AddString("workflowID", *v.WorkflowID) - } - if v.RunID != nil { - enc.AddString("runID", base64.StdEncoding.EncodeToString(v.RunID)) + if v.TaskID != nil { + enc.AddInt64("taskID", *v.TaskID) } - if v.TaskType != nil { - enc.AddInt16("taskType", *v.TaskType) + if v.VisibilityTimestamp != nil { + enc.AddInt64("visibilityTimestamp", *v.VisibilityTimestamp) } if v.TimeoutType != nil { - enc.AddInt16("timeoutType", *v.TimeoutType) - } - if v.Version != nil { - enc.AddInt64("version", *v.Version) - } - if v.ScheduleAttempt != nil { - enc.AddInt64("scheduleAttempt", *v.ScheduleAttempt) - } - if v.EventID != nil { - enc.AddInt64("eventID", *v.EventID) + enc.AddInt16("TimeoutType", *v.TimeoutType) } return err } -// GetDomainID returns the value of DomainID if it is set or its -// zero value if it is unset. -func (v *TimerTaskInfo) GetDomainID() (o []byte) { - if v != nil && v.DomainID != nil { - return v.DomainID - } - - return -} - -// IsSetDomainID returns true if DomainID is not nil. -func (v *TimerTaskInfo) IsSetDomainID() bool { - return v != nil && v.DomainID != nil -} - -// GetWorkflowID returns the value of WorkflowID if it is set or its -// zero value if it is unset. -func (v *TimerTaskInfo) GetWorkflowID() (o string) { - if v != nil && v.WorkflowID != nil { - return *v.WorkflowID - } - - return -} - -// IsSetWorkflowID returns true if WorkflowID is not nil. -func (v *TimerTaskInfo) IsSetWorkflowID() bool { - return v != nil && v.WorkflowID != nil -} - -// GetRunID returns the value of RunID if it is set or its +// GetTaskID returns the value of TaskID if it is set or its // zero value if it is unset. -func (v *TimerTaskInfo) GetRunID() (o []byte) { - if v != nil && v.RunID != nil { - return v.RunID +func (v *TimerReference) GetTaskID() (o int64) { + if v != nil && v.TaskID != nil { + return *v.TaskID } return } -// IsSetRunID returns true if RunID is not nil. -func (v *TimerTaskInfo) IsSetRunID() bool { - return v != nil && v.RunID != nil +// IsSetTaskID returns true if TaskID is not nil. +func (v *TimerReference) IsSetTaskID() bool { + return v != nil && v.TaskID != nil } -// GetTaskType returns the value of TaskType if it is set or its +// GetVisibilityTimestamp returns the value of VisibilityTimestamp if it is set or its // zero value if it is unset. -func (v *TimerTaskInfo) GetTaskType() (o int16) { - if v != nil && v.TaskType != nil { - return *v.TaskType +func (v *TimerReference) GetVisibilityTimestamp() (o int64) { + if v != nil && v.VisibilityTimestamp != nil { + return *v.VisibilityTimestamp } return } -// IsSetTaskType returns true if TaskType is not nil. -func (v *TimerTaskInfo) IsSetTaskType() bool { - return v != nil && v.TaskType != nil +// IsSetVisibilityTimestamp returns true if VisibilityTimestamp is not nil. +func (v *TimerReference) IsSetVisibilityTimestamp() bool { + return v != nil && v.VisibilityTimestamp != nil } // GetTimeoutType returns the value of TimeoutType if it is set or its // zero value if it is unset. -func (v *TimerTaskInfo) GetTimeoutType() (o int16) { +func (v *TimerReference) GetTimeoutType() (o int16) { if v != nil && v.TimeoutType != nil { return *v.TimeoutType } @@ -13185,101 +12918,22 @@ func (v *TimerTaskInfo) GetTimeoutType() (o int16) { } // IsSetTimeoutType returns true if TimeoutType is not nil. -func (v *TimerTaskInfo) IsSetTimeoutType() bool { +func (v *TimerReference) IsSetTimeoutType() bool { return v != nil && v.TimeoutType != nil } -// GetVersion returns the value of Version if it is set or its -// zero value if it is unset. -func (v *TimerTaskInfo) GetVersion() (o int64) { - if v != nil && v.Version != nil { - return *v.Version - } - - return -} - -// IsSetVersion returns true if Version is not nil. -func (v *TimerTaskInfo) IsSetVersion() bool { - return v != nil && v.Version != nil -} - -// GetScheduleAttempt returns the value of ScheduleAttempt if it is set or its -// zero value if it is unset. -func (v *TimerTaskInfo) GetScheduleAttempt() (o int64) { - if v != nil && v.ScheduleAttempt != nil { - return *v.ScheduleAttempt - } - - return -} - -// IsSetScheduleAttempt returns true if ScheduleAttempt is not nil. -func (v *TimerTaskInfo) IsSetScheduleAttempt() bool { - return v != nil && v.ScheduleAttempt != nil -} - -// GetEventID returns the value of EventID if it is set or its -// zero value if it is unset. -func (v *TimerTaskInfo) GetEventID() (o int64) { - if v != nil && v.EventID != nil { - return *v.EventID - } - - return -} - -// IsSetEventID returns true if EventID is not nil. -func (v *TimerTaskInfo) IsSetEventID() bool { - return v != nil && v.EventID != nil -} - -type TransferTaskInfo struct { - DomainID []byte `json:"domainID,omitempty"` - WorkflowID *string `json:"workflowID,omitempty"` - RunID []byte `json:"runID,omitempty"` - TaskType *int16 `json:"taskType,omitempty"` - TargetDomainID []byte `json:"targetDomainID,omitempty"` - TargetWorkflowID *string `json:"targetWorkflowID,omitempty"` - TargetRunID []byte `json:"targetRunID,omitempty"` - TaskList *string `json:"taskList,omitempty"` - TargetChildWorkflowOnly *bool `json:"targetChildWorkflowOnly,omitempty"` - ScheduleID *int64 `json:"scheduleID,omitempty"` - Version *int64 `json:"version,omitempty"` - VisibilityTimestampNanos *int64 `json:"visibilityTimestampNanos,omitempty"` - TargetDomainIDs [][]byte `json:"targetDomainIDs,omitempty"` -} - -type _Set_Binary_sliceType_ValueList [][]byte - -func (v _Set_Binary_sliceType_ValueList) ForEach(f func(wire.Value) error) error { - for _, x := range v { - if x == nil { - return fmt.Errorf("invalid set '[]byte': contains nil value") - } - w, err := wire.NewValueBinary(x), error(nil) - if err != nil { - return err - } - - if err := f(w); err != nil { - return err - } - } - return nil -} - -func (v _Set_Binary_sliceType_ValueList) Size() int { - return len(v) -} - -func (_Set_Binary_sliceType_ValueList) ValueType() wire.Type { - return wire.TBinary +type TimerTaskInfo struct { + DomainID []byte `json:"domainID,omitempty"` + WorkflowID *string `json:"workflowID,omitempty"` + RunID []byte `json:"runID,omitempty"` + TaskType *int16 `json:"taskType,omitempty"` + TimeoutType *int16 `json:"timeoutType,omitempty"` + Version *int64 `json:"version,omitempty"` + ScheduleAttempt *int64 `json:"scheduleAttempt,omitempty"` + EventID *int64 `json:"eventID,omitempty"` } -func (_Set_Binary_sliceType_ValueList) Close() {} - -// ToWire translates a TransferTaskInfo struct into a Thrift-level intermediate +// ToWire translates a TimerTaskInfo struct into a Thrift-level intermediate // representation. This intermediate representation may be serialized // into bytes using a ThriftRW protocol implementation. // @@ -13294,9 +12948,9 @@ func (_Set_Binary_sliceType_ValueList) Close() {} // if err := binaryProtocol.Encode(x, writer); err != nil { // return err // } -func (v *TransferTaskInfo) ToWire() (wire.Value, error) { +func (v *TimerTaskInfo) ToWire() (wire.Value, error) { var ( - fields [13]wire.Field + fields [8]wire.Field i int = 0 w wire.Value err error @@ -13334,106 +12988,47 @@ func (v *TransferTaskInfo) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 16, Value: w} i++ } - if v.TargetDomainID != nil { - w, err = wire.NewValueBinary(v.TargetDomainID), error(nil) + if v.TimeoutType != nil { + w, err = wire.NewValueI16(*(v.TimeoutType)), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 18, Value: w} i++ } - if v.TargetWorkflowID != nil { - w, err = wire.NewValueString(*(v.TargetWorkflowID)), error(nil) + if v.Version != nil { + w, err = wire.NewValueI64(*(v.Version)), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 20, Value: w} i++ } - if v.TargetRunID != nil { - w, err = wire.NewValueBinary(v.TargetRunID), error(nil) + if v.ScheduleAttempt != nil { + w, err = wire.NewValueI64(*(v.ScheduleAttempt)), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 22, Value: w} i++ } - if v.TaskList != nil { - w, err = wire.NewValueString(*(v.TaskList)), error(nil) + if v.EventID != nil { + w, err = wire.NewValueI64(*(v.EventID)), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 24, Value: w} i++ } - if v.TargetChildWorkflowOnly != nil { - w, err = wire.NewValueBool(*(v.TargetChildWorkflowOnly)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 26, Value: w} - i++ - } - if v.ScheduleID != nil { - w, err = wire.NewValueI64(*(v.ScheduleID)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 28, Value: w} - i++ - } - if v.Version != nil { - w, err = wire.NewValueI64(*(v.Version)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 30, Value: w} - i++ - } - if v.VisibilityTimestampNanos != nil { - w, err = wire.NewValueI64(*(v.VisibilityTimestampNanos)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 32, Value: w} - i++ - } - if v.TargetDomainIDs != nil { - w, err = wire.NewValueSet(_Set_Binary_sliceType_ValueList(v.TargetDomainIDs)), error(nil) - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 34, Value: w} - i++ - } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } -func _Set_Binary_sliceType_Read(s wire.ValueList) ([][]byte, error) { - if s.ValueType() != wire.TBinary { - return nil, nil - } - - o := make([][]byte, 0, s.Size()) - err := s.ForEach(func(x wire.Value) error { - i, err := x.GetBinary(), error(nil) - if err != nil { - return err - } - - o = append(o, i) - return nil - }) - s.Close() - return o, err -} - -// FromWire deserializes a TransferTaskInfo struct from its Thrift-level +// FromWire deserializes a TimerTaskInfo struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. // -// An error is returned if we were unable to build a TransferTaskInfo struct +// An error is returned if we were unable to build a TimerTaskInfo struct // from the provided intermediate representation. // // x, err := binaryProtocol.Decode(reader, wire.TStruct) @@ -13441,12 +13036,12 @@ func _Set_Binary_sliceType_Read(s wire.ValueList) ([][]byte, error) { // return nil, err // } // -// var v TransferTaskInfo +// var v TimerTaskInfo // if err := v.FromWire(x); err != nil { // return nil, err // } // return &v, nil -func (v *TransferTaskInfo) FromWire(w wire.Value) error { +func (v *TimerTaskInfo) FromWire(w wire.Value) error { var err error for _, field := range w.GetStruct().Fields { @@ -13488,84 +13083,40 @@ func (v *TransferTaskInfo) FromWire(w wire.Value) error { } case 18: - if field.Value.Type() == wire.TBinary { - v.TargetDomainID, err = field.Value.GetBinary(), error(nil) + if field.Value.Type() == wire.TI16 { + var x int16 + x, err = field.Value.GetI16(), error(nil) + v.TimeoutType = &x if err != nil { return err } } case 20: - if field.Value.Type() == wire.TBinary { - var x string - x, err = field.Value.GetString(), error(nil) - v.TargetWorkflowID = &x - if err != nil { - return err - } - - } - case 22: - if field.Value.Type() == wire.TBinary { - v.TargetRunID, err = field.Value.GetBinary(), error(nil) - if err != nil { - return err - } - - } - case 24: - if field.Value.Type() == wire.TBinary { - var x string - x, err = field.Value.GetString(), error(nil) - v.TaskList = &x - if err != nil { - return err - } - - } - case 26: - if field.Value.Type() == wire.TBool { - var x bool - x, err = field.Value.GetBool(), error(nil) - v.TargetChildWorkflowOnly = &x - if err != nil { - return err - } - - } - case 28: if field.Value.Type() == wire.TI64 { var x int64 x, err = field.Value.GetI64(), error(nil) - v.ScheduleID = &x + v.Version = &x if err != nil { return err } } - case 30: + case 22: if field.Value.Type() == wire.TI64 { var x int64 x, err = field.Value.GetI64(), error(nil) - v.Version = &x + v.ScheduleAttempt = &x if err != nil { return err } } - case 32: + case 24: if field.Value.Type() == wire.TI64 { var x int64 x, err = field.Value.GetI64(), error(nil) - v.VisibilityTimestampNanos = &x - if err != nil { - return err - } - - } - case 34: - if field.Value.Type() == wire.TSet { - v.TargetDomainIDs, err = _Set_Binary_sliceType_Read(field.Value.GetSet()) + v.EventID = &x if err != nil { return err } @@ -13577,34 +13128,11 @@ func (v *TransferTaskInfo) FromWire(w wire.Value) error { return nil } -func _Set_Binary_sliceType_Encode(val [][]byte, sw stream.Writer) error { - - sh := stream.SetHeader{ - Type: wire.TBinary, - Length: len(val), - } - - if err := sw.WriteSetBegin(sh); err != nil { - return err - } - - for _, v := range val { - if v == nil { - return fmt.Errorf("invalid set '[]byte': contains nil value") - } - - if err := sw.WriteBinary(v); err != nil { - return err - } - } - return sw.WriteSetEnd() -} - -// Encode serializes a TransferTaskInfo struct directly into bytes, without going +// Encode serializes a TimerTaskInfo struct directly into bytes, without going // through an intermediary type. // -// An error is returned if a TransferTaskInfo struct could not be encoded. -func (v *TransferTaskInfo) Encode(sw stream.Writer) error { +// An error is returned if a TimerTaskInfo struct could not be encoded. +func (v *TimerTaskInfo) Encode(sw stream.Writer) error { if err := sw.WriteStructBegin(); err != nil { return err } @@ -13657,71 +13185,11 @@ func (v *TransferTaskInfo) Encode(sw stream.Writer) error { } } - if v.TargetDomainID != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 18, Type: wire.TBinary}); err != nil { - return err - } - if err := sw.WriteBinary(v.TargetDomainID); err != nil { - return err - } - if err := sw.WriteFieldEnd(); err != nil { - return err - } - } - - if v.TargetWorkflowID != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 20, Type: wire.TBinary}); err != nil { - return err - } - if err := sw.WriteString(*(v.TargetWorkflowID)); err != nil { - return err - } - if err := sw.WriteFieldEnd(); err != nil { - return err - } - } - - if v.TargetRunID != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 22, Type: wire.TBinary}); err != nil { - return err - } - if err := sw.WriteBinary(v.TargetRunID); err != nil { - return err - } - if err := sw.WriteFieldEnd(); err != nil { - return err - } - } - - if v.TaskList != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 24, Type: wire.TBinary}); err != nil { - return err - } - if err := sw.WriteString(*(v.TaskList)); err != nil { - return err - } - if err := sw.WriteFieldEnd(); err != nil { - return err - } - } - - if v.TargetChildWorkflowOnly != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 26, Type: wire.TBool}); err != nil { - return err - } - if err := sw.WriteBool(*(v.TargetChildWorkflowOnly)); err != nil { - return err - } - if err := sw.WriteFieldEnd(); err != nil { - return err - } - } - - if v.ScheduleID != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 28, Type: wire.TI64}); err != nil { + if v.TimeoutType != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 18, Type: wire.TI16}); err != nil { return err } - if err := sw.WriteInt64(*(v.ScheduleID)); err != nil { + if err := sw.WriteInt16(*(v.TimeoutType)); err != nil { return err } if err := sw.WriteFieldEnd(); err != nil { @@ -13730,7 +13198,7 @@ func (v *TransferTaskInfo) Encode(sw stream.Writer) error { } if v.Version != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 30, Type: wire.TI64}); err != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 20, Type: wire.TI64}); err != nil { return err } if err := sw.WriteInt64(*(v.Version)); err != nil { @@ -13741,11 +13209,11 @@ func (v *TransferTaskInfo) Encode(sw stream.Writer) error { } } - if v.VisibilityTimestampNanos != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 32, Type: wire.TI64}); err != nil { + if v.ScheduleAttempt != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 22, Type: wire.TI64}); err != nil { return err } - if err := sw.WriteInt64(*(v.VisibilityTimestampNanos)); err != nil { + if err := sw.WriteInt64(*(v.ScheduleAttempt)); err != nil { return err } if err := sw.WriteFieldEnd(); err != nil { @@ -13753,11 +13221,11 @@ func (v *TransferTaskInfo) Encode(sw stream.Writer) error { } } - if v.TargetDomainIDs != nil { - if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 34, Type: wire.TSet}); err != nil { + if v.EventID != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 24, Type: wire.TI64}); err != nil { return err } - if err := _Set_Binary_sliceType_Encode(v.TargetDomainIDs, sw); err != nil { + if err := sw.WriteInt64(*(v.EventID)); err != nil { return err } if err := sw.WriteFieldEnd(); err != nil { @@ -13768,59 +13236,28 @@ func (v *TransferTaskInfo) Encode(sw stream.Writer) error { return sw.WriteStructEnd() } -func _Set_Binary_sliceType_Decode(sr stream.Reader) ([][]byte, error) { - sh, err := sr.ReadSetBegin() +// Decode deserializes a TimerTaskInfo struct directly from its Thrift-level +// representation, without going through an intemediary type. +// +// An error is returned if a TimerTaskInfo struct could not be generated from the wire +// representation. +func (v *TimerTaskInfo) Decode(sr stream.Reader) error { + + if err := sr.ReadStructBegin(); err != nil { + return err + } + + fh, ok, err := sr.ReadFieldBegin() if err != nil { - return nil, err + return err } - if sh.Type != wire.TBinary { - for i := 0; i < sh.Length; i++ { - if err := sr.Skip(sh.Type); err != nil { - return nil, err - } - } - return nil, sr.ReadSetEnd() - } - - o := make([][]byte, 0, sh.Length) - for i := 0; i < sh.Length; i++ { - v, err := sr.ReadBinary() - if err != nil { - return nil, err - } - - o = append(o, v) - } - - if err = sr.ReadSetEnd(); err != nil { - return nil, err - } - return o, err -} - -// Decode deserializes a TransferTaskInfo struct directly from its Thrift-level -// representation, without going through an intemediary type. -// -// An error is returned if a TransferTaskInfo struct could not be generated from the wire -// representation. -func (v *TransferTaskInfo) Decode(sr stream.Reader) error { - - if err := sr.ReadStructBegin(); err != nil { - return err - } - - fh, ok, err := sr.ReadFieldBegin() - if err != nil { - return err - } - - for ok { - switch { - case fh.ID == 10 && fh.Type == wire.TBinary: - v.DomainID, err = sr.ReadBinary() - if err != nil { - return err + for ok { + switch { + case fh.ID == 10 && fh.Type == wire.TBinary: + v.DomainID, err = sr.ReadBinary() + if err != nil { + return err } case fh.ID == 12 && fh.Type == wire.TBinary: @@ -13845,68 +13282,34 @@ func (v *TransferTaskInfo) Decode(sr stream.Reader) error { return err } - case fh.ID == 18 && fh.Type == wire.TBinary: - v.TargetDomainID, err = sr.ReadBinary() - if err != nil { - return err - } - - case fh.ID == 20 && fh.Type == wire.TBinary: - var x string - x, err = sr.ReadString() - v.TargetWorkflowID = &x - if err != nil { - return err - } - - case fh.ID == 22 && fh.Type == wire.TBinary: - v.TargetRunID, err = sr.ReadBinary() - if err != nil { - return err - } - - case fh.ID == 24 && fh.Type == wire.TBinary: - var x string - x, err = sr.ReadString() - v.TaskList = &x - if err != nil { - return err - } - - case fh.ID == 26 && fh.Type == wire.TBool: - var x bool - x, err = sr.ReadBool() - v.TargetChildWorkflowOnly = &x + case fh.ID == 18 && fh.Type == wire.TI16: + var x int16 + x, err = sr.ReadInt16() + v.TimeoutType = &x if err != nil { return err } - case fh.ID == 28 && fh.Type == wire.TI64: + case fh.ID == 20 && fh.Type == wire.TI64: var x int64 x, err = sr.ReadInt64() - v.ScheduleID = &x + v.Version = &x if err != nil { return err } - case fh.ID == 30 && fh.Type == wire.TI64: + case fh.ID == 22 && fh.Type == wire.TI64: var x int64 x, err = sr.ReadInt64() - v.Version = &x + v.ScheduleAttempt = &x if err != nil { return err } - case fh.ID == 32 && fh.Type == wire.TI64: + case fh.ID == 24 && fh.Type == wire.TI64: var x int64 x, err = sr.ReadInt64() - v.VisibilityTimestampNanos = &x - if err != nil { - return err - } - - case fh.ID == 34 && fh.Type == wire.TSet: - v.TargetDomainIDs, err = _Set_Binary_sliceType_Decode(sr) + v.EventID = &x if err != nil { return err } @@ -13933,14 +13336,14 @@ func (v *TransferTaskInfo) Decode(sr stream.Reader) error { return nil } -// String returns a readable string representation of a TransferTaskInfo +// String returns a readable string representation of a TimerTaskInfo // struct. -func (v *TransferTaskInfo) String() string { +func (v *TimerTaskInfo) String() string { if v == nil { return "" } - var fields [13]string + var fields [8]string i := 0 if v.DomainID != nil { fields[i] = fmt.Sprintf("DomainID: %v", v.DomainID) @@ -13958,72 +13361,31 @@ func (v *TransferTaskInfo) String() string { fields[i] = fmt.Sprintf("TaskType: %v", *(v.TaskType)) i++ } - if v.TargetDomainID != nil { - fields[i] = fmt.Sprintf("TargetDomainID: %v", v.TargetDomainID) - i++ - } - if v.TargetWorkflowID != nil { - fields[i] = fmt.Sprintf("TargetWorkflowID: %v", *(v.TargetWorkflowID)) - i++ - } - if v.TargetRunID != nil { - fields[i] = fmt.Sprintf("TargetRunID: %v", v.TargetRunID) - i++ - } - if v.TaskList != nil { - fields[i] = fmt.Sprintf("TaskList: %v", *(v.TaskList)) - i++ - } - if v.TargetChildWorkflowOnly != nil { - fields[i] = fmt.Sprintf("TargetChildWorkflowOnly: %v", *(v.TargetChildWorkflowOnly)) - i++ - } - if v.ScheduleID != nil { - fields[i] = fmt.Sprintf("ScheduleID: %v", *(v.ScheduleID)) + if v.TimeoutType != nil { + fields[i] = fmt.Sprintf("TimeoutType: %v", *(v.TimeoutType)) i++ } if v.Version != nil { fields[i] = fmt.Sprintf("Version: %v", *(v.Version)) i++ } - if v.VisibilityTimestampNanos != nil { - fields[i] = fmt.Sprintf("VisibilityTimestampNanos: %v", *(v.VisibilityTimestampNanos)) + if v.ScheduleAttempt != nil { + fields[i] = fmt.Sprintf("ScheduleAttempt: %v", *(v.ScheduleAttempt)) i++ } - if v.TargetDomainIDs != nil { - fields[i] = fmt.Sprintf("TargetDomainIDs: %v", v.TargetDomainIDs) + if v.EventID != nil { + fields[i] = fmt.Sprintf("EventID: %v", *(v.EventID)) i++ } - return fmt.Sprintf("TransferTaskInfo{%v}", strings.Join(fields[:i], ", ")) -} - -func _Set_Binary_sliceType_Equals(lhs, rhs [][]byte) bool { - if len(lhs) != len(rhs) { - return false - } - - for _, x := range lhs { - ok := false - for _, y := range rhs { - if bytes.Equal(x, y) { - ok = true - break - } - } - if !ok { - return false - } - } - - return true + return fmt.Sprintf("TimerTaskInfo{%v}", strings.Join(fields[:i], ", ")) } -// Equals returns true if all the fields of this TransferTaskInfo match the -// provided TransferTaskInfo. +// Equals returns true if all the fields of this TimerTaskInfo match the +// provided TimerTaskInfo. // // This function performs a deep comparison. -func (v *TransferTaskInfo) Equals(rhs *TransferTaskInfo) bool { +func (v *TimerTaskInfo) Equals(rhs *TimerTaskInfo) bool { if v == nil { return rhs == nil } else if rhs == nil { @@ -14041,51 +13403,25 @@ func (v *TransferTaskInfo) Equals(rhs *TransferTaskInfo) bool { if !_I16_EqualsPtr(v.TaskType, rhs.TaskType) { return false } - if !((v.TargetDomainID == nil && rhs.TargetDomainID == nil) || (v.TargetDomainID != nil && rhs.TargetDomainID != nil && bytes.Equal(v.TargetDomainID, rhs.TargetDomainID))) { - return false - } - if !_String_EqualsPtr(v.TargetWorkflowID, rhs.TargetWorkflowID) { - return false - } - if !((v.TargetRunID == nil && rhs.TargetRunID == nil) || (v.TargetRunID != nil && rhs.TargetRunID != nil && bytes.Equal(v.TargetRunID, rhs.TargetRunID))) { - return false - } - if !_String_EqualsPtr(v.TaskList, rhs.TaskList) { - return false - } - if !_Bool_EqualsPtr(v.TargetChildWorkflowOnly, rhs.TargetChildWorkflowOnly) { - return false - } - if !_I64_EqualsPtr(v.ScheduleID, rhs.ScheduleID) { + if !_I16_EqualsPtr(v.TimeoutType, rhs.TimeoutType) { return false } if !_I64_EqualsPtr(v.Version, rhs.Version) { return false } - if !_I64_EqualsPtr(v.VisibilityTimestampNanos, rhs.VisibilityTimestampNanos) { + if !_I64_EqualsPtr(v.ScheduleAttempt, rhs.ScheduleAttempt) { return false } - if !((v.TargetDomainIDs == nil && rhs.TargetDomainIDs == nil) || (v.TargetDomainIDs != nil && rhs.TargetDomainIDs != nil && _Set_Binary_sliceType_Equals(v.TargetDomainIDs, rhs.TargetDomainIDs))) { + if !_I64_EqualsPtr(v.EventID, rhs.EventID) { return false } return true } -type _Set_Binary_sliceType_Zapper [][]byte - -// MarshalLogArray implements zapcore.ArrayMarshaler, enabling -// fast logging of _Set_Binary_sliceType_Zapper. -func (s _Set_Binary_sliceType_Zapper) MarshalLogArray(enc zapcore.ArrayEncoder) (err error) { - for _, v := range s { - enc.AppendString(base64.StdEncoding.EncodeToString(v)) - } - return err -} - // MarshalLogObject implements zapcore.ObjectMarshaler, enabling -// fast logging of TransferTaskInfo. -func (v *TransferTaskInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { +// fast logging of TimerTaskInfo. +func (v *TimerTaskInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { if v == nil { return nil } @@ -14101,39 +13437,24 @@ func (v *TransferTaskInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err erro if v.TaskType != nil { enc.AddInt16("taskType", *v.TaskType) } - if v.TargetDomainID != nil { - enc.AddString("targetDomainID", base64.StdEncoding.EncodeToString(v.TargetDomainID)) - } - if v.TargetWorkflowID != nil { - enc.AddString("targetWorkflowID", *v.TargetWorkflowID) - } - if v.TargetRunID != nil { - enc.AddString("targetRunID", base64.StdEncoding.EncodeToString(v.TargetRunID)) - } - if v.TaskList != nil { - enc.AddString("taskList", *v.TaskList) - } - if v.TargetChildWorkflowOnly != nil { - enc.AddBool("targetChildWorkflowOnly", *v.TargetChildWorkflowOnly) - } - if v.ScheduleID != nil { - enc.AddInt64("scheduleID", *v.ScheduleID) + if v.TimeoutType != nil { + enc.AddInt16("timeoutType", *v.TimeoutType) } if v.Version != nil { enc.AddInt64("version", *v.Version) } - if v.VisibilityTimestampNanos != nil { - enc.AddInt64("visibilityTimestampNanos", *v.VisibilityTimestampNanos) + if v.ScheduleAttempt != nil { + enc.AddInt64("scheduleAttempt", *v.ScheduleAttempt) } - if v.TargetDomainIDs != nil { - err = multierr.Append(err, enc.AddArray("targetDomainIDs", (_Set_Binary_sliceType_Zapper)(v.TargetDomainIDs))) + if v.EventID != nil { + enc.AddInt64("eventID", *v.EventID) } return err } // GetDomainID returns the value of DomainID if it is set or its // zero value if it is unset. -func (v *TransferTaskInfo) GetDomainID() (o []byte) { +func (v *TimerTaskInfo) GetDomainID() (o []byte) { if v != nil && v.DomainID != nil { return v.DomainID } @@ -14142,13 +13463,13 @@ func (v *TransferTaskInfo) GetDomainID() (o []byte) { } // IsSetDomainID returns true if DomainID is not nil. -func (v *TransferTaskInfo) IsSetDomainID() bool { +func (v *TimerTaskInfo) IsSetDomainID() bool { return v != nil && v.DomainID != nil } // GetWorkflowID returns the value of WorkflowID if it is set or its // zero value if it is unset. -func (v *TransferTaskInfo) GetWorkflowID() (o string) { +func (v *TimerTaskInfo) GetWorkflowID() (o string) { if v != nil && v.WorkflowID != nil { return *v.WorkflowID } @@ -14157,13 +13478,13 @@ func (v *TransferTaskInfo) GetWorkflowID() (o string) { } // IsSetWorkflowID returns true if WorkflowID is not nil. -func (v *TransferTaskInfo) IsSetWorkflowID() bool { +func (v *TimerTaskInfo) IsSetWorkflowID() bool { return v != nil && v.WorkflowID != nil } // GetRunID returns the value of RunID if it is set or its // zero value if it is unset. -func (v *TransferTaskInfo) GetRunID() (o []byte) { +func (v *TimerTaskInfo) GetRunID() (o []byte) { if v != nil && v.RunID != nil { return v.RunID } @@ -14172,13 +13493,13 @@ func (v *TransferTaskInfo) GetRunID() (o []byte) { } // IsSetRunID returns true if RunID is not nil. -func (v *TransferTaskInfo) IsSetRunID() bool { +func (v *TimerTaskInfo) IsSetRunID() bool { return v != nil && v.RunID != nil } // GetTaskType returns the value of TaskType if it is set or its // zero value if it is unset. -func (v *TransferTaskInfo) GetTaskType() (o int16) { +func (v *TimerTaskInfo) GetTaskType() (o int16) { if v != nil && v.TaskType != nil { return *v.TaskType } @@ -14187,103 +13508,28 @@ func (v *TransferTaskInfo) GetTaskType() (o int16) { } // IsSetTaskType returns true if TaskType is not nil. -func (v *TransferTaskInfo) IsSetTaskType() bool { +func (v *TimerTaskInfo) IsSetTaskType() bool { return v != nil && v.TaskType != nil } -// GetTargetDomainID returns the value of TargetDomainID if it is set or its +// GetTimeoutType returns the value of TimeoutType if it is set or its // zero value if it is unset. -func (v *TransferTaskInfo) GetTargetDomainID() (o []byte) { - if v != nil && v.TargetDomainID != nil { - return v.TargetDomainID +func (v *TimerTaskInfo) GetTimeoutType() (o int16) { + if v != nil && v.TimeoutType != nil { + return *v.TimeoutType } return } -// IsSetTargetDomainID returns true if TargetDomainID is not nil. -func (v *TransferTaskInfo) IsSetTargetDomainID() bool { - return v != nil && v.TargetDomainID != nil +// IsSetTimeoutType returns true if TimeoutType is not nil. +func (v *TimerTaskInfo) IsSetTimeoutType() bool { + return v != nil && v.TimeoutType != nil } -// GetTargetWorkflowID returns the value of TargetWorkflowID if it is set or its +// GetVersion returns the value of Version if it is set or its // zero value if it is unset. -func (v *TransferTaskInfo) GetTargetWorkflowID() (o string) { - if v != nil && v.TargetWorkflowID != nil { - return *v.TargetWorkflowID - } - - return -} - -// IsSetTargetWorkflowID returns true if TargetWorkflowID is not nil. -func (v *TransferTaskInfo) IsSetTargetWorkflowID() bool { - return v != nil && v.TargetWorkflowID != nil -} - -// GetTargetRunID returns the value of TargetRunID if it is set or its -// zero value if it is unset. -func (v *TransferTaskInfo) GetTargetRunID() (o []byte) { - if v != nil && v.TargetRunID != nil { - return v.TargetRunID - } - - return -} - -// IsSetTargetRunID returns true if TargetRunID is not nil. -func (v *TransferTaskInfo) IsSetTargetRunID() bool { - return v != nil && v.TargetRunID != nil -} - -// GetTaskList returns the value of TaskList if it is set or its -// zero value if it is unset. -func (v *TransferTaskInfo) GetTaskList() (o string) { - if v != nil && v.TaskList != nil { - return *v.TaskList - } - - return -} - -// IsSetTaskList returns true if TaskList is not nil. -func (v *TransferTaskInfo) IsSetTaskList() bool { - return v != nil && v.TaskList != nil -} - -// GetTargetChildWorkflowOnly returns the value of TargetChildWorkflowOnly if it is set or its -// zero value if it is unset. -func (v *TransferTaskInfo) GetTargetChildWorkflowOnly() (o bool) { - if v != nil && v.TargetChildWorkflowOnly != nil { - return *v.TargetChildWorkflowOnly - } - - return -} - -// IsSetTargetChildWorkflowOnly returns true if TargetChildWorkflowOnly is not nil. -func (v *TransferTaskInfo) IsSetTargetChildWorkflowOnly() bool { - return v != nil && v.TargetChildWorkflowOnly != nil -} - -// GetScheduleID returns the value of ScheduleID if it is set or its -// zero value if it is unset. -func (v *TransferTaskInfo) GetScheduleID() (o int64) { - if v != nil && v.ScheduleID != nil { - return *v.ScheduleID - } - - return -} - -// IsSetScheduleID returns true if ScheduleID is not nil. -func (v *TransferTaskInfo) IsSetScheduleID() bool { - return v != nil && v.ScheduleID != nil -} - -// GetVersion returns the value of Version if it is set or its -// zero value if it is unset. -func (v *TransferTaskInfo) GetVersion() (o int64) { +func (v *TimerTaskInfo) GetVersion() (o int64) { if v != nil && v.Version != nil { return *v.Version } @@ -14292,148 +13538,86 @@ func (v *TransferTaskInfo) GetVersion() (o int64) { } // IsSetVersion returns true if Version is not nil. -func (v *TransferTaskInfo) IsSetVersion() bool { +func (v *TimerTaskInfo) IsSetVersion() bool { return v != nil && v.Version != nil } -// GetVisibilityTimestampNanos returns the value of VisibilityTimestampNanos if it is set or its +// GetScheduleAttempt returns the value of ScheduleAttempt if it is set or its // zero value if it is unset. -func (v *TransferTaskInfo) GetVisibilityTimestampNanos() (o int64) { - if v != nil && v.VisibilityTimestampNanos != nil { - return *v.VisibilityTimestampNanos +func (v *TimerTaskInfo) GetScheduleAttempt() (o int64) { + if v != nil && v.ScheduleAttempt != nil { + return *v.ScheduleAttempt } return } -// IsSetVisibilityTimestampNanos returns true if VisibilityTimestampNanos is not nil. -func (v *TransferTaskInfo) IsSetVisibilityTimestampNanos() bool { - return v != nil && v.VisibilityTimestampNanos != nil +// IsSetScheduleAttempt returns true if ScheduleAttempt is not nil. +func (v *TimerTaskInfo) IsSetScheduleAttempt() bool { + return v != nil && v.ScheduleAttempt != nil } -// GetTargetDomainIDs returns the value of TargetDomainIDs if it is set or its +// GetEventID returns the value of EventID if it is set or its // zero value if it is unset. -func (v *TransferTaskInfo) GetTargetDomainIDs() (o [][]byte) { - if v != nil && v.TargetDomainIDs != nil { - return v.TargetDomainIDs +func (v *TimerTaskInfo) GetEventID() (o int64) { + if v != nil && v.EventID != nil { + return *v.EventID } return } -// IsSetTargetDomainIDs returns true if TargetDomainIDs is not nil. -func (v *TransferTaskInfo) IsSetTargetDomainIDs() bool { - return v != nil && v.TargetDomainIDs != nil +// IsSetEventID returns true if EventID is not nil. +func (v *TimerTaskInfo) IsSetEventID() bool { + return v != nil && v.EventID != nil } -type WorkflowExecutionInfo struct { - ParentDomainID []byte `json:"parentDomainID,omitempty"` - ParentWorkflowID *string `json:"parentWorkflowID,omitempty"` - ParentRunID []byte `json:"parentRunID,omitempty"` - InitiatedID *int64 `json:"initiatedID,omitempty"` - CompletionEventBatchID *int64 `json:"completionEventBatchID,omitempty"` - CompletionEvent []byte `json:"completionEvent,omitempty"` - CompletionEventEncoding *string `json:"completionEventEncoding,omitempty"` - TaskList *string `json:"taskList,omitempty"` - TaskListKind *shared.TaskListKind `json:"taskListKind,omitempty"` - WorkflowTypeName *string `json:"workflowTypeName,omitempty"` - WorkflowTimeoutSeconds *int32 `json:"workflowTimeoutSeconds,omitempty"` - DecisionTaskTimeoutSeconds *int32 `json:"decisionTaskTimeoutSeconds,omitempty"` - ExecutionContext []byte `json:"executionContext,omitempty"` - State *int32 `json:"state,omitempty"` - CloseStatus *int32 `json:"closeStatus,omitempty"` - StartVersion *int64 `json:"startVersion,omitempty"` - LastWriteEventID *int64 `json:"lastWriteEventID,omitempty"` - LastEventTaskID *int64 `json:"lastEventTaskID,omitempty"` - LastFirstEventID *int64 `json:"lastFirstEventID,omitempty"` - LastProcessedEvent *int64 `json:"lastProcessedEvent,omitempty"` - StartTimeNanos *int64 `json:"startTimeNanos,omitempty"` - LastUpdatedTimeNanos *int64 `json:"lastUpdatedTimeNanos,omitempty"` - DecisionVersion *int64 `json:"decisionVersion,omitempty"` - DecisionScheduleID *int64 `json:"decisionScheduleID,omitempty"` - DecisionStartedID *int64 `json:"decisionStartedID,omitempty"` - DecisionTimeout *int32 `json:"decisionTimeout,omitempty"` - DecisionAttempt *int64 `json:"decisionAttempt,omitempty"` - DecisionStartedTimestampNanos *int64 `json:"decisionStartedTimestampNanos,omitempty"` - DecisionScheduledTimestampNanos *int64 `json:"decisionScheduledTimestampNanos,omitempty"` - CancelRequested *bool `json:"cancelRequested,omitempty"` - DecisionOriginalScheduledTimestampNanos *int64 `json:"decisionOriginalScheduledTimestampNanos,omitempty"` - CreateRequestID *string `json:"createRequestID,omitempty"` - DecisionRequestID *string `json:"decisionRequestID,omitempty"` - CancelRequestID *string `json:"cancelRequestID,omitempty"` - StickyTaskList *string `json:"stickyTaskList,omitempty"` - StickyScheduleToStartTimeout *int64 `json:"stickyScheduleToStartTimeout,omitempty"` - RetryAttempt *int64 `json:"retryAttempt,omitempty"` - RetryInitialIntervalSeconds *int32 `json:"retryInitialIntervalSeconds,omitempty"` - RetryMaximumIntervalSeconds *int32 `json:"retryMaximumIntervalSeconds,omitempty"` - RetryMaximumAttempts *int32 `json:"retryMaximumAttempts,omitempty"` - RetryExpirationSeconds *int32 `json:"retryExpirationSeconds,omitempty"` - RetryBackoffCoefficient *float64 `json:"retryBackoffCoefficient,omitempty"` - RetryExpirationTimeNanos *int64 `json:"retryExpirationTimeNanos,omitempty"` - RetryNonRetryableErrors []string `json:"retryNonRetryableErrors,omitempty"` - HasRetryPolicy *bool `json:"hasRetryPolicy,omitempty"` - CronSchedule *string `json:"cronSchedule,omitempty"` - EventStoreVersion *int32 `json:"eventStoreVersion,omitempty"` - EventBranchToken []byte `json:"eventBranchToken,omitempty"` - SignalCount *int64 `json:"signalCount,omitempty"` - HistorySize *int64 `json:"historySize,omitempty"` - ClientLibraryVersion *string `json:"clientLibraryVersion,omitempty"` - ClientFeatureVersion *string `json:"clientFeatureVersion,omitempty"` - ClientImpl *string `json:"clientImpl,omitempty"` - AutoResetPoints []byte `json:"autoResetPoints,omitempty"` - AutoResetPointsEncoding *string `json:"autoResetPointsEncoding,omitempty"` - SearchAttributes map[string][]byte `json:"searchAttributes,omitempty"` - Memo map[string][]byte `json:"memo,omitempty"` - VersionHistories []byte `json:"versionHistories,omitempty"` - VersionHistoriesEncoding *string `json:"versionHistoriesEncoding,omitempty"` - FirstExecutionRunID []byte `json:"firstExecutionRunID,omitempty"` - PartitionConfig map[string]string `json:"partitionConfig,omitempty"` - Checksum []byte `json:"checksum,omitempty"` - ChecksumEncoding *string `json:"checksumEncoding,omitempty"` - CronOverlapPolicy *shared.CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` - ActiveClusterSelectionPolicy []byte `json:"activeClusterSelectionPolicy,omitempty"` - ActiveClusterSelectionPolicyEncoding *string `json:"activeClusterSelectionPolicyEncoding,omitempty"` +type TransferTaskInfo struct { + DomainID []byte `json:"domainID,omitempty"` + WorkflowID *string `json:"workflowID,omitempty"` + RunID []byte `json:"runID,omitempty"` + TaskType *int16 `json:"taskType,omitempty"` + TargetDomainID []byte `json:"targetDomainID,omitempty"` + TargetWorkflowID *string `json:"targetWorkflowID,omitempty"` + TargetRunID []byte `json:"targetRunID,omitempty"` + TaskList *string `json:"taskList,omitempty"` + TargetChildWorkflowOnly *bool `json:"targetChildWorkflowOnly,omitempty"` + ScheduleID *int64 `json:"scheduleID,omitempty"` + Version *int64 `json:"version,omitempty"` + VisibilityTimestampNanos *int64 `json:"visibilityTimestampNanos,omitempty"` + TargetDomainIDs [][]byte `json:"targetDomainIDs,omitempty"` } -type _Map_String_Binary_MapItemList map[string][]byte +type _Set_Binary_sliceType_ValueList [][]byte -func (m _Map_String_Binary_MapItemList) ForEach(f func(wire.MapItem) error) error { - for k, v := range m { - if v == nil { - return fmt.Errorf("invalid map 'map[string][]byte', key [%v]: value is nil", k) +func (v _Set_Binary_sliceType_ValueList) ForEach(f func(wire.Value) error) error { + for _, x := range v { + if x == nil { + return fmt.Errorf("invalid set '[]byte': contains nil value") } - kw, err := wire.NewValueString(k), error(nil) + w, err := wire.NewValueBinary(x), error(nil) if err != nil { return err } - vw, err := wire.NewValueBinary(v), error(nil) - if err != nil { - return err - } - err = f(wire.MapItem{Key: kw, Value: vw}) - if err != nil { + if err := f(w); err != nil { return err } } return nil } -func (m _Map_String_Binary_MapItemList) Size() int { - return len(m) -} - -func (_Map_String_Binary_MapItemList) KeyType() wire.Type { - return wire.TBinary +func (v _Set_Binary_sliceType_ValueList) Size() int { + return len(v) } -func (_Map_String_Binary_MapItemList) ValueType() wire.Type { +func (_Set_Binary_sliceType_ValueList) ValueType() wire.Type { return wire.TBinary } -func (_Map_String_Binary_MapItemList) Close() {} +func (_Set_Binary_sliceType_ValueList) Close() {} -// ToWire translates a WorkflowExecutionInfo struct into a Thrift-level intermediate +// ToWire translates a TransferTaskInfo struct into a Thrift-level intermediate // representation. This intermediate representation may be serialized // into bytes using a ThriftRW protocol implementation. // @@ -14448,64 +13632,64 @@ func (_Map_String_Binary_MapItemList) Close() {} // if err := binaryProtocol.Encode(x, writer); err != nil { // return err // } -func (v *WorkflowExecutionInfo) ToWire() (wire.Value, error) { +func (v *TransferTaskInfo) ToWire() (wire.Value, error) { var ( - fields [66]wire.Field + fields [13]wire.Field i int = 0 w wire.Value err error ) - if v.ParentDomainID != nil { - w, err = wire.NewValueBinary(v.ParentDomainID), error(nil) + if v.DomainID != nil { + w, err = wire.NewValueBinary(v.DomainID), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 10, Value: w} i++ } - if v.ParentWorkflowID != nil { - w, err = wire.NewValueString(*(v.ParentWorkflowID)), error(nil) + if v.WorkflowID != nil { + w, err = wire.NewValueString(*(v.WorkflowID)), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 12, Value: w} i++ } - if v.ParentRunID != nil { - w, err = wire.NewValueBinary(v.ParentRunID), error(nil) + if v.RunID != nil { + w, err = wire.NewValueBinary(v.RunID), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 14, Value: w} i++ } - if v.InitiatedID != nil { - w, err = wire.NewValueI64(*(v.InitiatedID)), error(nil) + if v.TaskType != nil { + w, err = wire.NewValueI16(*(v.TaskType)), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 16, Value: w} i++ } - if v.CompletionEventBatchID != nil { - w, err = wire.NewValueI64(*(v.CompletionEventBatchID)), error(nil) + if v.TargetDomainID != nil { + w, err = wire.NewValueBinary(v.TargetDomainID), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 18, Value: w} i++ } - if v.CompletionEvent != nil { - w, err = wire.NewValueBinary(v.CompletionEvent), error(nil) + if v.TargetWorkflowID != nil { + w, err = wire.NewValueString(*(v.TargetWorkflowID)), error(nil) if err != nil { return w, err } fields[i] = wire.Field{ID: 20, Value: w} i++ } - if v.CompletionEventEncoding != nil { - w, err = wire.NewValueString(*(v.CompletionEventEncoding)), error(nil) + if v.TargetRunID != nil { + w, err = wire.NewValueBinary(v.TargetRunID), error(nil) if err != nil { return w, err } @@ -14520,9 +13704,1163 @@ func (v *WorkflowExecutionInfo) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 24, Value: w} i++ } - if v.TaskListKind != nil { - w, err = v.TaskListKind.ToWire() - if err != nil { + if v.TargetChildWorkflowOnly != nil { + w, err = wire.NewValueBool(*(v.TargetChildWorkflowOnly)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 26, Value: w} + i++ + } + if v.ScheduleID != nil { + w, err = wire.NewValueI64(*(v.ScheduleID)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 28, Value: w} + i++ + } + if v.Version != nil { + w, err = wire.NewValueI64(*(v.Version)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 30, Value: w} + i++ + } + if v.VisibilityTimestampNanos != nil { + w, err = wire.NewValueI64(*(v.VisibilityTimestampNanos)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 32, Value: w} + i++ + } + if v.TargetDomainIDs != nil { + w, err = wire.NewValueSet(_Set_Binary_sliceType_ValueList(v.TargetDomainIDs)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 34, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _Set_Binary_sliceType_Read(s wire.ValueList) ([][]byte, error) { + if s.ValueType() != wire.TBinary { + return nil, nil + } + + o := make([][]byte, 0, s.Size()) + err := s.ForEach(func(x wire.Value) error { + i, err := x.GetBinary(), error(nil) + if err != nil { + return err + } + + o = append(o, i) + return nil + }) + s.Close() + return o, err +} + +// FromWire deserializes a TransferTaskInfo struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a TransferTaskInfo struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v TransferTaskInfo +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *TransferTaskInfo) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TBinary { + v.DomainID, err = field.Value.GetBinary(), error(nil) + if err != nil { + return err + } + + } + case 12: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.WorkflowID = &x + if err != nil { + return err + } + + } + case 14: + if field.Value.Type() == wire.TBinary { + v.RunID, err = field.Value.GetBinary(), error(nil) + if err != nil { + return err + } + + } + case 16: + if field.Value.Type() == wire.TI16 { + var x int16 + x, err = field.Value.GetI16(), error(nil) + v.TaskType = &x + if err != nil { + return err + } + + } + case 18: + if field.Value.Type() == wire.TBinary { + v.TargetDomainID, err = field.Value.GetBinary(), error(nil) + if err != nil { + return err + } + + } + case 20: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.TargetWorkflowID = &x + if err != nil { + return err + } + + } + case 22: + if field.Value.Type() == wire.TBinary { + v.TargetRunID, err = field.Value.GetBinary(), error(nil) + if err != nil { + return err + } + + } + case 24: + if field.Value.Type() == wire.TBinary { + var x string + x, err = field.Value.GetString(), error(nil) + v.TaskList = &x + if err != nil { + return err + } + + } + case 26: + if field.Value.Type() == wire.TBool { + var x bool + x, err = field.Value.GetBool(), error(nil) + v.TargetChildWorkflowOnly = &x + if err != nil { + return err + } + + } + case 28: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.ScheduleID = &x + if err != nil { + return err + } + + } + case 30: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.Version = &x + if err != nil { + return err + } + + } + case 32: + if field.Value.Type() == wire.TI64 { + var x int64 + x, err = field.Value.GetI64(), error(nil) + v.VisibilityTimestampNanos = &x + if err != nil { + return err + } + + } + case 34: + if field.Value.Type() == wire.TSet { + v.TargetDomainIDs, err = _Set_Binary_sliceType_Read(field.Value.GetSet()) + if err != nil { + return err + } + + } + } + } + + return nil +} + +func _Set_Binary_sliceType_Encode(val [][]byte, sw stream.Writer) error { + + sh := stream.SetHeader{ + Type: wire.TBinary, + Length: len(val), + } + + if err := sw.WriteSetBegin(sh); err != nil { + return err + } + + for _, v := range val { + if v == nil { + return fmt.Errorf("invalid set '[]byte': contains nil value") + } + + if err := sw.WriteBinary(v); err != nil { + return err + } + } + return sw.WriteSetEnd() +} + +// Encode serializes a TransferTaskInfo struct directly into bytes, without going +// through an intermediary type. +// +// An error is returned if a TransferTaskInfo struct could not be encoded. +func (v *TransferTaskInfo) Encode(sw stream.Writer) error { + if err := sw.WriteStructBegin(); err != nil { + return err + } + + if v.DomainID != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 10, Type: wire.TBinary}); err != nil { + return err + } + if err := sw.WriteBinary(v.DomainID); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.WorkflowID != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 12, Type: wire.TBinary}); err != nil { + return err + } + if err := sw.WriteString(*(v.WorkflowID)); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.RunID != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 14, Type: wire.TBinary}); err != nil { + return err + } + if err := sw.WriteBinary(v.RunID); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.TaskType != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 16, Type: wire.TI16}); err != nil { + return err + } + if err := sw.WriteInt16(*(v.TaskType)); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.TargetDomainID != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 18, Type: wire.TBinary}); err != nil { + return err + } + if err := sw.WriteBinary(v.TargetDomainID); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.TargetWorkflowID != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 20, Type: wire.TBinary}); err != nil { + return err + } + if err := sw.WriteString(*(v.TargetWorkflowID)); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.TargetRunID != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 22, Type: wire.TBinary}); err != nil { + return err + } + if err := sw.WriteBinary(v.TargetRunID); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.TaskList != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 24, Type: wire.TBinary}); err != nil { + return err + } + if err := sw.WriteString(*(v.TaskList)); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.TargetChildWorkflowOnly != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 26, Type: wire.TBool}); err != nil { + return err + } + if err := sw.WriteBool(*(v.TargetChildWorkflowOnly)); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.ScheduleID != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 28, Type: wire.TI64}); err != nil { + return err + } + if err := sw.WriteInt64(*(v.ScheduleID)); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.Version != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 30, Type: wire.TI64}); err != nil { + return err + } + if err := sw.WriteInt64(*(v.Version)); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.VisibilityTimestampNanos != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 32, Type: wire.TI64}); err != nil { + return err + } + if err := sw.WriteInt64(*(v.VisibilityTimestampNanos)); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + if v.TargetDomainIDs != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 34, Type: wire.TSet}); err != nil { + return err + } + if err := _Set_Binary_sliceType_Encode(v.TargetDomainIDs, sw); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } + } + + return sw.WriteStructEnd() +} + +func _Set_Binary_sliceType_Decode(sr stream.Reader) ([][]byte, error) { + sh, err := sr.ReadSetBegin() + if err != nil { + return nil, err + } + + if sh.Type != wire.TBinary { + for i := 0; i < sh.Length; i++ { + if err := sr.Skip(sh.Type); err != nil { + return nil, err + } + } + return nil, sr.ReadSetEnd() + } + + o := make([][]byte, 0, sh.Length) + for i := 0; i < sh.Length; i++ { + v, err := sr.ReadBinary() + if err != nil { + return nil, err + } + + o = append(o, v) + } + + if err = sr.ReadSetEnd(); err != nil { + return nil, err + } + return o, err +} + +// Decode deserializes a TransferTaskInfo struct directly from its Thrift-level +// representation, without going through an intemediary type. +// +// An error is returned if a TransferTaskInfo struct could not be generated from the wire +// representation. +func (v *TransferTaskInfo) Decode(sr stream.Reader) error { + + if err := sr.ReadStructBegin(); err != nil { + return err + } + + fh, ok, err := sr.ReadFieldBegin() + if err != nil { + return err + } + + for ok { + switch { + case fh.ID == 10 && fh.Type == wire.TBinary: + v.DomainID, err = sr.ReadBinary() + if err != nil { + return err + } + + case fh.ID == 12 && fh.Type == wire.TBinary: + var x string + x, err = sr.ReadString() + v.WorkflowID = &x + if err != nil { + return err + } + + case fh.ID == 14 && fh.Type == wire.TBinary: + v.RunID, err = sr.ReadBinary() + if err != nil { + return err + } + + case fh.ID == 16 && fh.Type == wire.TI16: + var x int16 + x, err = sr.ReadInt16() + v.TaskType = &x + if err != nil { + return err + } + + case fh.ID == 18 && fh.Type == wire.TBinary: + v.TargetDomainID, err = sr.ReadBinary() + if err != nil { + return err + } + + case fh.ID == 20 && fh.Type == wire.TBinary: + var x string + x, err = sr.ReadString() + v.TargetWorkflowID = &x + if err != nil { + return err + } + + case fh.ID == 22 && fh.Type == wire.TBinary: + v.TargetRunID, err = sr.ReadBinary() + if err != nil { + return err + } + + case fh.ID == 24 && fh.Type == wire.TBinary: + var x string + x, err = sr.ReadString() + v.TaskList = &x + if err != nil { + return err + } + + case fh.ID == 26 && fh.Type == wire.TBool: + var x bool + x, err = sr.ReadBool() + v.TargetChildWorkflowOnly = &x + if err != nil { + return err + } + + case fh.ID == 28 && fh.Type == wire.TI64: + var x int64 + x, err = sr.ReadInt64() + v.ScheduleID = &x + if err != nil { + return err + } + + case fh.ID == 30 && fh.Type == wire.TI64: + var x int64 + x, err = sr.ReadInt64() + v.Version = &x + if err != nil { + return err + } + + case fh.ID == 32 && fh.Type == wire.TI64: + var x int64 + x, err = sr.ReadInt64() + v.VisibilityTimestampNanos = &x + if err != nil { + return err + } + + case fh.ID == 34 && fh.Type == wire.TSet: + v.TargetDomainIDs, err = _Set_Binary_sliceType_Decode(sr) + if err != nil { + return err + } + + default: + if err := sr.Skip(fh.Type); err != nil { + return err + } + } + + if err := sr.ReadFieldEnd(); err != nil { + return err + } + + if fh, ok, err = sr.ReadFieldBegin(); err != nil { + return err + } + } + + if err := sr.ReadStructEnd(); err != nil { + return err + } + + return nil +} + +// String returns a readable string representation of a TransferTaskInfo +// struct. +func (v *TransferTaskInfo) String() string { + if v == nil { + return "" + } + + var fields [13]string + i := 0 + if v.DomainID != nil { + fields[i] = fmt.Sprintf("DomainID: %v", v.DomainID) + i++ + } + if v.WorkflowID != nil { + fields[i] = fmt.Sprintf("WorkflowID: %v", *(v.WorkflowID)) + i++ + } + if v.RunID != nil { + fields[i] = fmt.Sprintf("RunID: %v", v.RunID) + i++ + } + if v.TaskType != nil { + fields[i] = fmt.Sprintf("TaskType: %v", *(v.TaskType)) + i++ + } + if v.TargetDomainID != nil { + fields[i] = fmt.Sprintf("TargetDomainID: %v", v.TargetDomainID) + i++ + } + if v.TargetWorkflowID != nil { + fields[i] = fmt.Sprintf("TargetWorkflowID: %v", *(v.TargetWorkflowID)) + i++ + } + if v.TargetRunID != nil { + fields[i] = fmt.Sprintf("TargetRunID: %v", v.TargetRunID) + i++ + } + if v.TaskList != nil { + fields[i] = fmt.Sprintf("TaskList: %v", *(v.TaskList)) + i++ + } + if v.TargetChildWorkflowOnly != nil { + fields[i] = fmt.Sprintf("TargetChildWorkflowOnly: %v", *(v.TargetChildWorkflowOnly)) + i++ + } + if v.ScheduleID != nil { + fields[i] = fmt.Sprintf("ScheduleID: %v", *(v.ScheduleID)) + i++ + } + if v.Version != nil { + fields[i] = fmt.Sprintf("Version: %v", *(v.Version)) + i++ + } + if v.VisibilityTimestampNanos != nil { + fields[i] = fmt.Sprintf("VisibilityTimestampNanos: %v", *(v.VisibilityTimestampNanos)) + i++ + } + if v.TargetDomainIDs != nil { + fields[i] = fmt.Sprintf("TargetDomainIDs: %v", v.TargetDomainIDs) + i++ + } + + return fmt.Sprintf("TransferTaskInfo{%v}", strings.Join(fields[:i], ", ")) +} + +func _Set_Binary_sliceType_Equals(lhs, rhs [][]byte) bool { + if len(lhs) != len(rhs) { + return false + } + + for _, x := range lhs { + ok := false + for _, y := range rhs { + if bytes.Equal(x, y) { + ok = true + break + } + } + if !ok { + return false + } + } + + return true +} + +// Equals returns true if all the fields of this TransferTaskInfo match the +// provided TransferTaskInfo. +// +// This function performs a deep comparison. +func (v *TransferTaskInfo) Equals(rhs *TransferTaskInfo) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.DomainID == nil && rhs.DomainID == nil) || (v.DomainID != nil && rhs.DomainID != nil && bytes.Equal(v.DomainID, rhs.DomainID))) { + return false + } + if !_String_EqualsPtr(v.WorkflowID, rhs.WorkflowID) { + return false + } + if !((v.RunID == nil && rhs.RunID == nil) || (v.RunID != nil && rhs.RunID != nil && bytes.Equal(v.RunID, rhs.RunID))) { + return false + } + if !_I16_EqualsPtr(v.TaskType, rhs.TaskType) { + return false + } + if !((v.TargetDomainID == nil && rhs.TargetDomainID == nil) || (v.TargetDomainID != nil && rhs.TargetDomainID != nil && bytes.Equal(v.TargetDomainID, rhs.TargetDomainID))) { + return false + } + if !_String_EqualsPtr(v.TargetWorkflowID, rhs.TargetWorkflowID) { + return false + } + if !((v.TargetRunID == nil && rhs.TargetRunID == nil) || (v.TargetRunID != nil && rhs.TargetRunID != nil && bytes.Equal(v.TargetRunID, rhs.TargetRunID))) { + return false + } + if !_String_EqualsPtr(v.TaskList, rhs.TaskList) { + return false + } + if !_Bool_EqualsPtr(v.TargetChildWorkflowOnly, rhs.TargetChildWorkflowOnly) { + return false + } + if !_I64_EqualsPtr(v.ScheduleID, rhs.ScheduleID) { + return false + } + if !_I64_EqualsPtr(v.Version, rhs.Version) { + return false + } + if !_I64_EqualsPtr(v.VisibilityTimestampNanos, rhs.VisibilityTimestampNanos) { + return false + } + if !((v.TargetDomainIDs == nil && rhs.TargetDomainIDs == nil) || (v.TargetDomainIDs != nil && rhs.TargetDomainIDs != nil && _Set_Binary_sliceType_Equals(v.TargetDomainIDs, rhs.TargetDomainIDs))) { + return false + } + + return true +} + +type _Set_Binary_sliceType_Zapper [][]byte + +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _Set_Binary_sliceType_Zapper. +func (s _Set_Binary_sliceType_Zapper) MarshalLogArray(enc zapcore.ArrayEncoder) (err error) { + for _, v := range s { + enc.AppendString(base64.StdEncoding.EncodeToString(v)) + } + return err +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of TransferTaskInfo. +func (v *TransferTaskInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.DomainID != nil { + enc.AddString("domainID", base64.StdEncoding.EncodeToString(v.DomainID)) + } + if v.WorkflowID != nil { + enc.AddString("workflowID", *v.WorkflowID) + } + if v.RunID != nil { + enc.AddString("runID", base64.StdEncoding.EncodeToString(v.RunID)) + } + if v.TaskType != nil { + enc.AddInt16("taskType", *v.TaskType) + } + if v.TargetDomainID != nil { + enc.AddString("targetDomainID", base64.StdEncoding.EncodeToString(v.TargetDomainID)) + } + if v.TargetWorkflowID != nil { + enc.AddString("targetWorkflowID", *v.TargetWorkflowID) + } + if v.TargetRunID != nil { + enc.AddString("targetRunID", base64.StdEncoding.EncodeToString(v.TargetRunID)) + } + if v.TaskList != nil { + enc.AddString("taskList", *v.TaskList) + } + if v.TargetChildWorkflowOnly != nil { + enc.AddBool("targetChildWorkflowOnly", *v.TargetChildWorkflowOnly) + } + if v.ScheduleID != nil { + enc.AddInt64("scheduleID", *v.ScheduleID) + } + if v.Version != nil { + enc.AddInt64("version", *v.Version) + } + if v.VisibilityTimestampNanos != nil { + enc.AddInt64("visibilityTimestampNanos", *v.VisibilityTimestampNanos) + } + if v.TargetDomainIDs != nil { + err = multierr.Append(err, enc.AddArray("targetDomainIDs", (_Set_Binary_sliceType_Zapper)(v.TargetDomainIDs))) + } + return err +} + +// GetDomainID returns the value of DomainID if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetDomainID() (o []byte) { + if v != nil && v.DomainID != nil { + return v.DomainID + } + + return +} + +// IsSetDomainID returns true if DomainID is not nil. +func (v *TransferTaskInfo) IsSetDomainID() bool { + return v != nil && v.DomainID != nil +} + +// GetWorkflowID returns the value of WorkflowID if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetWorkflowID() (o string) { + if v != nil && v.WorkflowID != nil { + return *v.WorkflowID + } + + return +} + +// IsSetWorkflowID returns true if WorkflowID is not nil. +func (v *TransferTaskInfo) IsSetWorkflowID() bool { + return v != nil && v.WorkflowID != nil +} + +// GetRunID returns the value of RunID if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetRunID() (o []byte) { + if v != nil && v.RunID != nil { + return v.RunID + } + + return +} + +// IsSetRunID returns true if RunID is not nil. +func (v *TransferTaskInfo) IsSetRunID() bool { + return v != nil && v.RunID != nil +} + +// GetTaskType returns the value of TaskType if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetTaskType() (o int16) { + if v != nil && v.TaskType != nil { + return *v.TaskType + } + + return +} + +// IsSetTaskType returns true if TaskType is not nil. +func (v *TransferTaskInfo) IsSetTaskType() bool { + return v != nil && v.TaskType != nil +} + +// GetTargetDomainID returns the value of TargetDomainID if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetTargetDomainID() (o []byte) { + if v != nil && v.TargetDomainID != nil { + return v.TargetDomainID + } + + return +} + +// IsSetTargetDomainID returns true if TargetDomainID is not nil. +func (v *TransferTaskInfo) IsSetTargetDomainID() bool { + return v != nil && v.TargetDomainID != nil +} + +// GetTargetWorkflowID returns the value of TargetWorkflowID if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetTargetWorkflowID() (o string) { + if v != nil && v.TargetWorkflowID != nil { + return *v.TargetWorkflowID + } + + return +} + +// IsSetTargetWorkflowID returns true if TargetWorkflowID is not nil. +func (v *TransferTaskInfo) IsSetTargetWorkflowID() bool { + return v != nil && v.TargetWorkflowID != nil +} + +// GetTargetRunID returns the value of TargetRunID if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetTargetRunID() (o []byte) { + if v != nil && v.TargetRunID != nil { + return v.TargetRunID + } + + return +} + +// IsSetTargetRunID returns true if TargetRunID is not nil. +func (v *TransferTaskInfo) IsSetTargetRunID() bool { + return v != nil && v.TargetRunID != nil +} + +// GetTaskList returns the value of TaskList if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetTaskList() (o string) { + if v != nil && v.TaskList != nil { + return *v.TaskList + } + + return +} + +// IsSetTaskList returns true if TaskList is not nil. +func (v *TransferTaskInfo) IsSetTaskList() bool { + return v != nil && v.TaskList != nil +} + +// GetTargetChildWorkflowOnly returns the value of TargetChildWorkflowOnly if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetTargetChildWorkflowOnly() (o bool) { + if v != nil && v.TargetChildWorkflowOnly != nil { + return *v.TargetChildWorkflowOnly + } + + return +} + +// IsSetTargetChildWorkflowOnly returns true if TargetChildWorkflowOnly is not nil. +func (v *TransferTaskInfo) IsSetTargetChildWorkflowOnly() bool { + return v != nil && v.TargetChildWorkflowOnly != nil +} + +// GetScheduleID returns the value of ScheduleID if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetScheduleID() (o int64) { + if v != nil && v.ScheduleID != nil { + return *v.ScheduleID + } + + return +} + +// IsSetScheduleID returns true if ScheduleID is not nil. +func (v *TransferTaskInfo) IsSetScheduleID() bool { + return v != nil && v.ScheduleID != nil +} + +// GetVersion returns the value of Version if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetVersion() (o int64) { + if v != nil && v.Version != nil { + return *v.Version + } + + return +} + +// IsSetVersion returns true if Version is not nil. +func (v *TransferTaskInfo) IsSetVersion() bool { + return v != nil && v.Version != nil +} + +// GetVisibilityTimestampNanos returns the value of VisibilityTimestampNanos if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetVisibilityTimestampNanos() (o int64) { + if v != nil && v.VisibilityTimestampNanos != nil { + return *v.VisibilityTimestampNanos + } + + return +} + +// IsSetVisibilityTimestampNanos returns true if VisibilityTimestampNanos is not nil. +func (v *TransferTaskInfo) IsSetVisibilityTimestampNanos() bool { + return v != nil && v.VisibilityTimestampNanos != nil +} + +// GetTargetDomainIDs returns the value of TargetDomainIDs if it is set or its +// zero value if it is unset. +func (v *TransferTaskInfo) GetTargetDomainIDs() (o [][]byte) { + if v != nil && v.TargetDomainIDs != nil { + return v.TargetDomainIDs + } + + return +} + +// IsSetTargetDomainIDs returns true if TargetDomainIDs is not nil. +func (v *TransferTaskInfo) IsSetTargetDomainIDs() bool { + return v != nil && v.TargetDomainIDs != nil +} + +type WorkflowExecutionInfo struct { + ParentDomainID []byte `json:"parentDomainID,omitempty"` + ParentWorkflowID *string `json:"parentWorkflowID,omitempty"` + ParentRunID []byte `json:"parentRunID,omitempty"` + InitiatedID *int64 `json:"initiatedID,omitempty"` + CompletionEventBatchID *int64 `json:"completionEventBatchID,omitempty"` + CompletionEvent []byte `json:"completionEvent,omitempty"` + CompletionEventEncoding *string `json:"completionEventEncoding,omitempty"` + TaskList *string `json:"taskList,omitempty"` + TaskListKind *shared.TaskListKind `json:"taskListKind,omitempty"` + WorkflowTypeName *string `json:"workflowTypeName,omitempty"` + WorkflowTimeoutSeconds *int32 `json:"workflowTimeoutSeconds,omitempty"` + DecisionTaskTimeoutSeconds *int32 `json:"decisionTaskTimeoutSeconds,omitempty"` + ExecutionContext []byte `json:"executionContext,omitempty"` + State *int32 `json:"state,omitempty"` + CloseStatus *int32 `json:"closeStatus,omitempty"` + StartVersion *int64 `json:"startVersion,omitempty"` + LastWriteEventID *int64 `json:"lastWriteEventID,omitempty"` + LastEventTaskID *int64 `json:"lastEventTaskID,omitempty"` + LastFirstEventID *int64 `json:"lastFirstEventID,omitempty"` + LastProcessedEvent *int64 `json:"lastProcessedEvent,omitempty"` + StartTimeNanos *int64 `json:"startTimeNanos,omitempty"` + LastUpdatedTimeNanos *int64 `json:"lastUpdatedTimeNanos,omitempty"` + DecisionVersion *int64 `json:"decisionVersion,omitempty"` + DecisionScheduleID *int64 `json:"decisionScheduleID,omitempty"` + DecisionStartedID *int64 `json:"decisionStartedID,omitempty"` + DecisionTimeout *int32 `json:"decisionTimeout,omitempty"` + DecisionAttempt *int64 `json:"decisionAttempt,omitempty"` + DecisionStartedTimestampNanos *int64 `json:"decisionStartedTimestampNanos,omitempty"` + DecisionScheduledTimestampNanos *int64 `json:"decisionScheduledTimestampNanos,omitempty"` + CancelRequested *bool `json:"cancelRequested,omitempty"` + DecisionOriginalScheduledTimestampNanos *int64 `json:"decisionOriginalScheduledTimestampNanos,omitempty"` + CreateRequestID *string `json:"createRequestID,omitempty"` + DecisionRequestID *string `json:"decisionRequestID,omitempty"` + CancelRequestID *string `json:"cancelRequestID,omitempty"` + StickyTaskList *string `json:"stickyTaskList,omitempty"` + StickyScheduleToStartTimeout *int64 `json:"stickyScheduleToStartTimeout,omitempty"` + RetryAttempt *int64 `json:"retryAttempt,omitempty"` + RetryInitialIntervalSeconds *int32 `json:"retryInitialIntervalSeconds,omitempty"` + RetryMaximumIntervalSeconds *int32 `json:"retryMaximumIntervalSeconds,omitempty"` + RetryMaximumAttempts *int32 `json:"retryMaximumAttempts,omitempty"` + RetryExpirationSeconds *int32 `json:"retryExpirationSeconds,omitempty"` + RetryBackoffCoefficient *float64 `json:"retryBackoffCoefficient,omitempty"` + RetryExpirationTimeNanos *int64 `json:"retryExpirationTimeNanos,omitempty"` + RetryNonRetryableErrors []string `json:"retryNonRetryableErrors,omitempty"` + HasRetryPolicy *bool `json:"hasRetryPolicy,omitempty"` + CronSchedule *string `json:"cronSchedule,omitempty"` + EventStoreVersion *int32 `json:"eventStoreVersion,omitempty"` + EventBranchToken []byte `json:"eventBranchToken,omitempty"` + SignalCount *int64 `json:"signalCount,omitempty"` + HistorySize *int64 `json:"historySize,omitempty"` + ClientLibraryVersion *string `json:"clientLibraryVersion,omitempty"` + ClientFeatureVersion *string `json:"clientFeatureVersion,omitempty"` + ClientImpl *string `json:"clientImpl,omitempty"` + AutoResetPoints []byte `json:"autoResetPoints,omitempty"` + AutoResetPointsEncoding *string `json:"autoResetPointsEncoding,omitempty"` + SearchAttributes map[string][]byte `json:"searchAttributes,omitempty"` + Memo map[string][]byte `json:"memo,omitempty"` + VersionHistories []byte `json:"versionHistories,omitempty"` + VersionHistoriesEncoding *string `json:"versionHistoriesEncoding,omitempty"` + FirstExecutionRunID []byte `json:"firstExecutionRunID,omitempty"` + PartitionConfig map[string]string `json:"partitionConfig,omitempty"` + Checksum []byte `json:"checksum,omitempty"` + ChecksumEncoding *string `json:"checksumEncoding,omitempty"` + CronOverlapPolicy *shared.CronOverlapPolicy `json:"cronOverlapPolicy,omitempty"` + ActiveClusterSelectionPolicy []byte `json:"activeClusterSelectionPolicy,omitempty"` + ActiveClusterSelectionPolicyEncoding *string `json:"activeClusterSelectionPolicyEncoding,omitempty"` +} + +type _Map_String_Binary_MapItemList map[string][]byte + +func (m _Map_String_Binary_MapItemList) ForEach(f func(wire.MapItem) error) error { + for k, v := range m { + if v == nil { + return fmt.Errorf("invalid map 'map[string][]byte', key [%v]: value is nil", k) + } + kw, err := wire.NewValueString(k), error(nil) + if err != nil { + return err + } + + vw, err := wire.NewValueBinary(v), error(nil) + if err != nil { + return err + } + err = f(wire.MapItem{Key: kw, Value: vw}) + if err != nil { + return err + } + } + return nil +} + +func (m _Map_String_Binary_MapItemList) Size() int { + return len(m) +} + +func (_Map_String_Binary_MapItemList) KeyType() wire.Type { + return wire.TBinary +} + +func (_Map_String_Binary_MapItemList) ValueType() wire.Type { + return wire.TBinary +} + +func (_Map_String_Binary_MapItemList) Close() {} + +// ToWire translates a WorkflowExecutionInfo struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *WorkflowExecutionInfo) ToWire() (wire.Value, error) { + var ( + fields [66]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.ParentDomainID != nil { + w, err = wire.NewValueBinary(v.ParentDomainID), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ + } + if v.ParentWorkflowID != nil { + w, err = wire.NewValueString(*(v.ParentWorkflowID)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 12, Value: w} + i++ + } + if v.ParentRunID != nil { + w, err = wire.NewValueBinary(v.ParentRunID), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 14, Value: w} + i++ + } + if v.InitiatedID != nil { + w, err = wire.NewValueI64(*(v.InitiatedID)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 16, Value: w} + i++ + } + if v.CompletionEventBatchID != nil { + w, err = wire.NewValueI64(*(v.CompletionEventBatchID)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 18, Value: w} + i++ + } + if v.CompletionEvent != nil { + w, err = wire.NewValueBinary(v.CompletionEvent), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 20, Value: w} + i++ + } + if v.CompletionEventEncoding != nil { + w, err = wire.NewValueString(*(v.CompletionEventEncoding)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 22, Value: w} + i++ + } + if v.TaskList != nil { + w, err = wire.NewValueString(*(v.TaskList)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 24, Value: w} + i++ + } + if v.TaskListKind != nil { + w, err = v.TaskListKind.ToWire() + if err != nil { return w, err } fields[i] = wire.Field{ID: 25, Value: w} @@ -18419,407 +18757,749 @@ func (v *WorkflowExecutionInfo) IsSetRetryMaximumIntervalSeconds() bool { // GetRetryMaximumAttempts returns the value of RetryMaximumAttempts if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetRetryMaximumAttempts() (o int32) { - if v != nil && v.RetryMaximumAttempts != nil { - return *v.RetryMaximumAttempts +func (v *WorkflowExecutionInfo) GetRetryMaximumAttempts() (o int32) { + if v != nil && v.RetryMaximumAttempts != nil { + return *v.RetryMaximumAttempts + } + + return +} + +// IsSetRetryMaximumAttempts returns true if RetryMaximumAttempts is not nil. +func (v *WorkflowExecutionInfo) IsSetRetryMaximumAttempts() bool { + return v != nil && v.RetryMaximumAttempts != nil +} + +// GetRetryExpirationSeconds returns the value of RetryExpirationSeconds if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetRetryExpirationSeconds() (o int32) { + if v != nil && v.RetryExpirationSeconds != nil { + return *v.RetryExpirationSeconds + } + + return +} + +// IsSetRetryExpirationSeconds returns true if RetryExpirationSeconds is not nil. +func (v *WorkflowExecutionInfo) IsSetRetryExpirationSeconds() bool { + return v != nil && v.RetryExpirationSeconds != nil +} + +// GetRetryBackoffCoefficient returns the value of RetryBackoffCoefficient if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetRetryBackoffCoefficient() (o float64) { + if v != nil && v.RetryBackoffCoefficient != nil { + return *v.RetryBackoffCoefficient + } + + return +} + +// IsSetRetryBackoffCoefficient returns true if RetryBackoffCoefficient is not nil. +func (v *WorkflowExecutionInfo) IsSetRetryBackoffCoefficient() bool { + return v != nil && v.RetryBackoffCoefficient != nil +} + +// GetRetryExpirationTimeNanos returns the value of RetryExpirationTimeNanos if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetRetryExpirationTimeNanos() (o int64) { + if v != nil && v.RetryExpirationTimeNanos != nil { + return *v.RetryExpirationTimeNanos + } + + return +} + +// IsSetRetryExpirationTimeNanos returns true if RetryExpirationTimeNanos is not nil. +func (v *WorkflowExecutionInfo) IsSetRetryExpirationTimeNanos() bool { + return v != nil && v.RetryExpirationTimeNanos != nil +} + +// GetRetryNonRetryableErrors returns the value of RetryNonRetryableErrors if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetRetryNonRetryableErrors() (o []string) { + if v != nil && v.RetryNonRetryableErrors != nil { + return v.RetryNonRetryableErrors + } + + return +} + +// IsSetRetryNonRetryableErrors returns true if RetryNonRetryableErrors is not nil. +func (v *WorkflowExecutionInfo) IsSetRetryNonRetryableErrors() bool { + return v != nil && v.RetryNonRetryableErrors != nil +} + +// GetHasRetryPolicy returns the value of HasRetryPolicy if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetHasRetryPolicy() (o bool) { + if v != nil && v.HasRetryPolicy != nil { + return *v.HasRetryPolicy + } + + return +} + +// IsSetHasRetryPolicy returns true if HasRetryPolicy is not nil. +func (v *WorkflowExecutionInfo) IsSetHasRetryPolicy() bool { + return v != nil && v.HasRetryPolicy != nil +} + +// GetCronSchedule returns the value of CronSchedule if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetCronSchedule() (o string) { + if v != nil && v.CronSchedule != nil { + return *v.CronSchedule + } + + return +} + +// IsSetCronSchedule returns true if CronSchedule is not nil. +func (v *WorkflowExecutionInfo) IsSetCronSchedule() bool { + return v != nil && v.CronSchedule != nil +} + +// GetEventStoreVersion returns the value of EventStoreVersion if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetEventStoreVersion() (o int32) { + if v != nil && v.EventStoreVersion != nil { + return *v.EventStoreVersion + } + + return +} + +// IsSetEventStoreVersion returns true if EventStoreVersion is not nil. +func (v *WorkflowExecutionInfo) IsSetEventStoreVersion() bool { + return v != nil && v.EventStoreVersion != nil +} + +// GetEventBranchToken returns the value of EventBranchToken if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetEventBranchToken() (o []byte) { + if v != nil && v.EventBranchToken != nil { + return v.EventBranchToken + } + + return +} + +// IsSetEventBranchToken returns true if EventBranchToken is not nil. +func (v *WorkflowExecutionInfo) IsSetEventBranchToken() bool { + return v != nil && v.EventBranchToken != nil +} + +// GetSignalCount returns the value of SignalCount if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetSignalCount() (o int64) { + if v != nil && v.SignalCount != nil { + return *v.SignalCount + } + + return +} + +// IsSetSignalCount returns true if SignalCount is not nil. +func (v *WorkflowExecutionInfo) IsSetSignalCount() bool { + return v != nil && v.SignalCount != nil +} + +// GetHistorySize returns the value of HistorySize if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetHistorySize() (o int64) { + if v != nil && v.HistorySize != nil { + return *v.HistorySize + } + + return +} + +// IsSetHistorySize returns true if HistorySize is not nil. +func (v *WorkflowExecutionInfo) IsSetHistorySize() bool { + return v != nil && v.HistorySize != nil +} + +// GetClientLibraryVersion returns the value of ClientLibraryVersion if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetClientLibraryVersion() (o string) { + if v != nil && v.ClientLibraryVersion != nil { + return *v.ClientLibraryVersion + } + + return +} + +// IsSetClientLibraryVersion returns true if ClientLibraryVersion is not nil. +func (v *WorkflowExecutionInfo) IsSetClientLibraryVersion() bool { + return v != nil && v.ClientLibraryVersion != nil +} + +// GetClientFeatureVersion returns the value of ClientFeatureVersion if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetClientFeatureVersion() (o string) { + if v != nil && v.ClientFeatureVersion != nil { + return *v.ClientFeatureVersion + } + + return +} + +// IsSetClientFeatureVersion returns true if ClientFeatureVersion is not nil. +func (v *WorkflowExecutionInfo) IsSetClientFeatureVersion() bool { + return v != nil && v.ClientFeatureVersion != nil +} + +// GetClientImpl returns the value of ClientImpl if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetClientImpl() (o string) { + if v != nil && v.ClientImpl != nil { + return *v.ClientImpl + } + + return +} + +// IsSetClientImpl returns true if ClientImpl is not nil. +func (v *WorkflowExecutionInfo) IsSetClientImpl() bool { + return v != nil && v.ClientImpl != nil +} + +// GetAutoResetPoints returns the value of AutoResetPoints if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetAutoResetPoints() (o []byte) { + if v != nil && v.AutoResetPoints != nil { + return v.AutoResetPoints + } + + return +} + +// IsSetAutoResetPoints returns true if AutoResetPoints is not nil. +func (v *WorkflowExecutionInfo) IsSetAutoResetPoints() bool { + return v != nil && v.AutoResetPoints != nil +} + +// GetAutoResetPointsEncoding returns the value of AutoResetPointsEncoding if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetAutoResetPointsEncoding() (o string) { + if v != nil && v.AutoResetPointsEncoding != nil { + return *v.AutoResetPointsEncoding + } + + return +} + +// IsSetAutoResetPointsEncoding returns true if AutoResetPointsEncoding is not nil. +func (v *WorkflowExecutionInfo) IsSetAutoResetPointsEncoding() bool { + return v != nil && v.AutoResetPointsEncoding != nil +} + +// GetSearchAttributes returns the value of SearchAttributes if it is set or its +// zero value if it is unset. +func (v *WorkflowExecutionInfo) GetSearchAttributes() (o map[string][]byte) { + if v != nil && v.SearchAttributes != nil { + return v.SearchAttributes } return } -// IsSetRetryMaximumAttempts returns true if RetryMaximumAttempts is not nil. -func (v *WorkflowExecutionInfo) IsSetRetryMaximumAttempts() bool { - return v != nil && v.RetryMaximumAttempts != nil +// IsSetSearchAttributes returns true if SearchAttributes is not nil. +func (v *WorkflowExecutionInfo) IsSetSearchAttributes() bool { + return v != nil && v.SearchAttributes != nil } -// GetRetryExpirationSeconds returns the value of RetryExpirationSeconds if it is set or its +// GetMemo returns the value of Memo if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetRetryExpirationSeconds() (o int32) { - if v != nil && v.RetryExpirationSeconds != nil { - return *v.RetryExpirationSeconds +func (v *WorkflowExecutionInfo) GetMemo() (o map[string][]byte) { + if v != nil && v.Memo != nil { + return v.Memo } return } -// IsSetRetryExpirationSeconds returns true if RetryExpirationSeconds is not nil. -func (v *WorkflowExecutionInfo) IsSetRetryExpirationSeconds() bool { - return v != nil && v.RetryExpirationSeconds != nil +// IsSetMemo returns true if Memo is not nil. +func (v *WorkflowExecutionInfo) IsSetMemo() bool { + return v != nil && v.Memo != nil } -// GetRetryBackoffCoefficient returns the value of RetryBackoffCoefficient if it is set or its +// GetVersionHistories returns the value of VersionHistories if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetRetryBackoffCoefficient() (o float64) { - if v != nil && v.RetryBackoffCoefficient != nil { - return *v.RetryBackoffCoefficient +func (v *WorkflowExecutionInfo) GetVersionHistories() (o []byte) { + if v != nil && v.VersionHistories != nil { + return v.VersionHistories } return } -// IsSetRetryBackoffCoefficient returns true if RetryBackoffCoefficient is not nil. -func (v *WorkflowExecutionInfo) IsSetRetryBackoffCoefficient() bool { - return v != nil && v.RetryBackoffCoefficient != nil +// IsSetVersionHistories returns true if VersionHistories is not nil. +func (v *WorkflowExecutionInfo) IsSetVersionHistories() bool { + return v != nil && v.VersionHistories != nil } -// GetRetryExpirationTimeNanos returns the value of RetryExpirationTimeNanos if it is set or its +// GetVersionHistoriesEncoding returns the value of VersionHistoriesEncoding if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetRetryExpirationTimeNanos() (o int64) { - if v != nil && v.RetryExpirationTimeNanos != nil { - return *v.RetryExpirationTimeNanos +func (v *WorkflowExecutionInfo) GetVersionHistoriesEncoding() (o string) { + if v != nil && v.VersionHistoriesEncoding != nil { + return *v.VersionHistoriesEncoding } return } -// IsSetRetryExpirationTimeNanos returns true if RetryExpirationTimeNanos is not nil. -func (v *WorkflowExecutionInfo) IsSetRetryExpirationTimeNanos() bool { - return v != nil && v.RetryExpirationTimeNanos != nil +// IsSetVersionHistoriesEncoding returns true if VersionHistoriesEncoding is not nil. +func (v *WorkflowExecutionInfo) IsSetVersionHistoriesEncoding() bool { + return v != nil && v.VersionHistoriesEncoding != nil } -// GetRetryNonRetryableErrors returns the value of RetryNonRetryableErrors if it is set or its +// GetFirstExecutionRunID returns the value of FirstExecutionRunID if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetRetryNonRetryableErrors() (o []string) { - if v != nil && v.RetryNonRetryableErrors != nil { - return v.RetryNonRetryableErrors +func (v *WorkflowExecutionInfo) GetFirstExecutionRunID() (o []byte) { + if v != nil && v.FirstExecutionRunID != nil { + return v.FirstExecutionRunID } return } -// IsSetRetryNonRetryableErrors returns true if RetryNonRetryableErrors is not nil. -func (v *WorkflowExecutionInfo) IsSetRetryNonRetryableErrors() bool { - return v != nil && v.RetryNonRetryableErrors != nil +// IsSetFirstExecutionRunID returns true if FirstExecutionRunID is not nil. +func (v *WorkflowExecutionInfo) IsSetFirstExecutionRunID() bool { + return v != nil && v.FirstExecutionRunID != nil } -// GetHasRetryPolicy returns the value of HasRetryPolicy if it is set or its +// GetPartitionConfig returns the value of PartitionConfig if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetHasRetryPolicy() (o bool) { - if v != nil && v.HasRetryPolicy != nil { - return *v.HasRetryPolicy +func (v *WorkflowExecutionInfo) GetPartitionConfig() (o map[string]string) { + if v != nil && v.PartitionConfig != nil { + return v.PartitionConfig } return } -// IsSetHasRetryPolicy returns true if HasRetryPolicy is not nil. -func (v *WorkflowExecutionInfo) IsSetHasRetryPolicy() bool { - return v != nil && v.HasRetryPolicy != nil +// IsSetPartitionConfig returns true if PartitionConfig is not nil. +func (v *WorkflowExecutionInfo) IsSetPartitionConfig() bool { + return v != nil && v.PartitionConfig != nil } -// GetCronSchedule returns the value of CronSchedule if it is set or its +// GetChecksum returns the value of Checksum if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetCronSchedule() (o string) { - if v != nil && v.CronSchedule != nil { - return *v.CronSchedule +func (v *WorkflowExecutionInfo) GetChecksum() (o []byte) { + if v != nil && v.Checksum != nil { + return v.Checksum } return } -// IsSetCronSchedule returns true if CronSchedule is not nil. -func (v *WorkflowExecutionInfo) IsSetCronSchedule() bool { - return v != nil && v.CronSchedule != nil +// IsSetChecksum returns true if Checksum is not nil. +func (v *WorkflowExecutionInfo) IsSetChecksum() bool { + return v != nil && v.Checksum != nil } -// GetEventStoreVersion returns the value of EventStoreVersion if it is set or its +// GetChecksumEncoding returns the value of ChecksumEncoding if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetEventStoreVersion() (o int32) { - if v != nil && v.EventStoreVersion != nil { - return *v.EventStoreVersion +func (v *WorkflowExecutionInfo) GetChecksumEncoding() (o string) { + if v != nil && v.ChecksumEncoding != nil { + return *v.ChecksumEncoding } return } -// IsSetEventStoreVersion returns true if EventStoreVersion is not nil. -func (v *WorkflowExecutionInfo) IsSetEventStoreVersion() bool { - return v != nil && v.EventStoreVersion != nil +// IsSetChecksumEncoding returns true if ChecksumEncoding is not nil. +func (v *WorkflowExecutionInfo) IsSetChecksumEncoding() bool { + return v != nil && v.ChecksumEncoding != nil } -// GetEventBranchToken returns the value of EventBranchToken if it is set or its +// GetCronOverlapPolicy returns the value of CronOverlapPolicy if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetEventBranchToken() (o []byte) { - if v != nil && v.EventBranchToken != nil { - return v.EventBranchToken +func (v *WorkflowExecutionInfo) GetCronOverlapPolicy() (o shared.CronOverlapPolicy) { + if v != nil && v.CronOverlapPolicy != nil { + return *v.CronOverlapPolicy } return } -// IsSetEventBranchToken returns true if EventBranchToken is not nil. -func (v *WorkflowExecutionInfo) IsSetEventBranchToken() bool { - return v != nil && v.EventBranchToken != nil +// IsSetCronOverlapPolicy returns true if CronOverlapPolicy is not nil. +func (v *WorkflowExecutionInfo) IsSetCronOverlapPolicy() bool { + return v != nil && v.CronOverlapPolicy != nil } -// GetSignalCount returns the value of SignalCount if it is set or its +// GetActiveClusterSelectionPolicy returns the value of ActiveClusterSelectionPolicy if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetSignalCount() (o int64) { - if v != nil && v.SignalCount != nil { - return *v.SignalCount +func (v *WorkflowExecutionInfo) GetActiveClusterSelectionPolicy() (o []byte) { + if v != nil && v.ActiveClusterSelectionPolicy != nil { + return v.ActiveClusterSelectionPolicy } return } -// IsSetSignalCount returns true if SignalCount is not nil. -func (v *WorkflowExecutionInfo) IsSetSignalCount() bool { - return v != nil && v.SignalCount != nil +// IsSetActiveClusterSelectionPolicy returns true if ActiveClusterSelectionPolicy is not nil. +func (v *WorkflowExecutionInfo) IsSetActiveClusterSelectionPolicy() bool { + return v != nil && v.ActiveClusterSelectionPolicy != nil } -// GetHistorySize returns the value of HistorySize if it is set or its +// GetActiveClusterSelectionPolicyEncoding returns the value of ActiveClusterSelectionPolicyEncoding if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetHistorySize() (o int64) { - if v != nil && v.HistorySize != nil { - return *v.HistorySize +func (v *WorkflowExecutionInfo) GetActiveClusterSelectionPolicyEncoding() (o string) { + if v != nil && v.ActiveClusterSelectionPolicyEncoding != nil { + return *v.ActiveClusterSelectionPolicyEncoding } return } -// IsSetHistorySize returns true if HistorySize is not nil. -func (v *WorkflowExecutionInfo) IsSetHistorySize() bool { - return v != nil && v.HistorySize != nil +// IsSetActiveClusterSelectionPolicyEncoding returns true if ActiveClusterSelectionPolicyEncoding is not nil. +func (v *WorkflowExecutionInfo) IsSetActiveClusterSelectionPolicyEncoding() bool { + return v != nil && v.ActiveClusterSelectionPolicyEncoding != nil } -// GetClientLibraryVersion returns the value of ClientLibraryVersion if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetClientLibraryVersion() (o string) { - if v != nil && v.ClientLibraryVersion != nil { - return *v.ClientLibraryVersion +type WorkflowTimerTaskInfo struct { + References []*TimerReference `json:"references,omitempty"` +} + +type _List_TimerReference_ValueList []*TimerReference + +func (v _List_TimerReference_ValueList) ForEach(f func(wire.Value) error) error { + for i, x := range v { + if x == nil { + return fmt.Errorf("invalid list '[]*TimerReference', index [%v]: value is nil", i) + } + w, err := x.ToWire() + if err != nil { + return err + } + err = f(w) + if err != nil { + return err + } } + return nil +} - return +func (v _List_TimerReference_ValueList) Size() int { + return len(v) } -// IsSetClientLibraryVersion returns true if ClientLibraryVersion is not nil. -func (v *WorkflowExecutionInfo) IsSetClientLibraryVersion() bool { - return v != nil && v.ClientLibraryVersion != nil +func (_List_TimerReference_ValueList) ValueType() wire.Type { + return wire.TStruct } -// GetClientFeatureVersion returns the value of ClientFeatureVersion if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetClientFeatureVersion() (o string) { - if v != nil && v.ClientFeatureVersion != nil { - return *v.ClientFeatureVersion +func (_List_TimerReference_ValueList) Close() {} + +// ToWire translates a WorkflowTimerTaskInfo struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *WorkflowTimerTaskInfo) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.References != nil { + w, err = wire.NewValueList(_List_TimerReference_ValueList(v.References)), error(nil) + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 10, Value: w} + i++ } - return + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } -// IsSetClientFeatureVersion returns true if ClientFeatureVersion is not nil. -func (v *WorkflowExecutionInfo) IsSetClientFeatureVersion() bool { - return v != nil && v.ClientFeatureVersion != nil +func _TimerReference_Read(w wire.Value) (*TimerReference, error) { + var v TimerReference + err := v.FromWire(w) + return &v, err } -// GetClientImpl returns the value of ClientImpl if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetClientImpl() (o string) { - if v != nil && v.ClientImpl != nil { - return *v.ClientImpl +func _List_TimerReference_Read(l wire.ValueList) ([]*TimerReference, error) { + if l.ValueType() != wire.TStruct { + return nil, nil } - return + o := make([]*TimerReference, 0, l.Size()) + err := l.ForEach(func(x wire.Value) error { + i, err := _TimerReference_Read(x) + if err != nil { + return err + } + o = append(o, i) + return nil + }) + l.Close() + return o, err } -// IsSetClientImpl returns true if ClientImpl is not nil. -func (v *WorkflowExecutionInfo) IsSetClientImpl() bool { - return v != nil && v.ClientImpl != nil -} +// FromWire deserializes a WorkflowTimerTaskInfo struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a WorkflowTimerTaskInfo struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v WorkflowTimerTaskInfo +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *WorkflowTimerTaskInfo) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 10: + if field.Value.Type() == wire.TList { + v.References, err = _List_TimerReference_Read(field.Value.GetList()) + if err != nil { + return err + } -// GetAutoResetPoints returns the value of AutoResetPoints if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetAutoResetPoints() (o []byte) { - if v != nil && v.AutoResetPoints != nil { - return v.AutoResetPoints + } + } } - return + return nil } -// IsSetAutoResetPoints returns true if AutoResetPoints is not nil. -func (v *WorkflowExecutionInfo) IsSetAutoResetPoints() bool { - return v != nil && v.AutoResetPoints != nil -} +func _List_TimerReference_Encode(val []*TimerReference, sw stream.Writer) error { -// GetAutoResetPointsEncoding returns the value of AutoResetPointsEncoding if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetAutoResetPointsEncoding() (o string) { - if v != nil && v.AutoResetPointsEncoding != nil { - return *v.AutoResetPointsEncoding + lh := stream.ListHeader{ + Type: wire.TStruct, + Length: len(val), + } + if err := sw.WriteListBegin(lh); err != nil { + return err } - return + for i, v := range val { + if v == nil { + return fmt.Errorf("invalid list '[]*TimerReference', index [%v]: value is nil", i) + } + if err := v.Encode(sw); err != nil { + return err + } + } + return sw.WriteListEnd() } -// IsSetAutoResetPointsEncoding returns true if AutoResetPointsEncoding is not nil. -func (v *WorkflowExecutionInfo) IsSetAutoResetPointsEncoding() bool { - return v != nil && v.AutoResetPointsEncoding != nil -} +// Encode serializes a WorkflowTimerTaskInfo struct directly into bytes, without going +// through an intermediary type. +// +// An error is returned if a WorkflowTimerTaskInfo struct could not be encoded. +func (v *WorkflowTimerTaskInfo) Encode(sw stream.Writer) error { + if err := sw.WriteStructBegin(); err != nil { + return err + } -// GetSearchAttributes returns the value of SearchAttributes if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetSearchAttributes() (o map[string][]byte) { - if v != nil && v.SearchAttributes != nil { - return v.SearchAttributes + if v.References != nil { + if err := sw.WriteFieldBegin(stream.FieldHeader{ID: 10, Type: wire.TList}); err != nil { + return err + } + if err := _List_TimerReference_Encode(v.References, sw); err != nil { + return err + } + if err := sw.WriteFieldEnd(); err != nil { + return err + } } - return + return sw.WriteStructEnd() } -// IsSetSearchAttributes returns true if SearchAttributes is not nil. -func (v *WorkflowExecutionInfo) IsSetSearchAttributes() bool { - return v != nil && v.SearchAttributes != nil +func _TimerReference_Decode(sr stream.Reader) (*TimerReference, error) { + var v TimerReference + err := v.Decode(sr) + return &v, err } -// GetMemo returns the value of Memo if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetMemo() (o map[string][]byte) { - if v != nil && v.Memo != nil { - return v.Memo +func _List_TimerReference_Decode(sr stream.Reader) ([]*TimerReference, error) { + lh, err := sr.ReadListBegin() + if err != nil { + return nil, err } - return -} - -// IsSetMemo returns true if Memo is not nil. -func (v *WorkflowExecutionInfo) IsSetMemo() bool { - return v != nil && v.Memo != nil -} + if lh.Type != wire.TStruct { + for i := 0; i < lh.Length; i++ { + if err := sr.Skip(lh.Type); err != nil { + return nil, err + } + } + return nil, sr.ReadListEnd() + } -// GetVersionHistories returns the value of VersionHistories if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetVersionHistories() (o []byte) { - if v != nil && v.VersionHistories != nil { - return v.VersionHistories + o := make([]*TimerReference, 0, lh.Length) + for i := 0; i < lh.Length; i++ { + v, err := _TimerReference_Decode(sr) + if err != nil { + return nil, err + } + o = append(o, v) } - return + if err = sr.ReadListEnd(); err != nil { + return nil, err + } + return o, err } -// IsSetVersionHistories returns true if VersionHistories is not nil. -func (v *WorkflowExecutionInfo) IsSetVersionHistories() bool { - return v != nil && v.VersionHistories != nil -} +// Decode deserializes a WorkflowTimerTaskInfo struct directly from its Thrift-level +// representation, without going through an intemediary type. +// +// An error is returned if a WorkflowTimerTaskInfo struct could not be generated from the wire +// representation. +func (v *WorkflowTimerTaskInfo) Decode(sr stream.Reader) error { -// GetVersionHistoriesEncoding returns the value of VersionHistoriesEncoding if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetVersionHistoriesEncoding() (o string) { - if v != nil && v.VersionHistoriesEncoding != nil { - return *v.VersionHistoriesEncoding + if err := sr.ReadStructBegin(); err != nil { + return err } - return -} - -// IsSetVersionHistoriesEncoding returns true if VersionHistoriesEncoding is not nil. -func (v *WorkflowExecutionInfo) IsSetVersionHistoriesEncoding() bool { - return v != nil && v.VersionHistoriesEncoding != nil -} - -// GetFirstExecutionRunID returns the value of FirstExecutionRunID if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetFirstExecutionRunID() (o []byte) { - if v != nil && v.FirstExecutionRunID != nil { - return v.FirstExecutionRunID + fh, ok, err := sr.ReadFieldBegin() + if err != nil { + return err } - return -} + for ok { + switch { + case fh.ID == 10 && fh.Type == wire.TList: + v.References, err = _List_TimerReference_Decode(sr) + if err != nil { + return err + } -// IsSetFirstExecutionRunID returns true if FirstExecutionRunID is not nil. -func (v *WorkflowExecutionInfo) IsSetFirstExecutionRunID() bool { - return v != nil && v.FirstExecutionRunID != nil -} + default: + if err := sr.Skip(fh.Type); err != nil { + return err + } + } -// GetPartitionConfig returns the value of PartitionConfig if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetPartitionConfig() (o map[string]string) { - if v != nil && v.PartitionConfig != nil { - return v.PartitionConfig + if err := sr.ReadFieldEnd(); err != nil { + return err + } + + if fh, ok, err = sr.ReadFieldBegin(); err != nil { + return err + } } - return -} + if err := sr.ReadStructEnd(); err != nil { + return err + } -// IsSetPartitionConfig returns true if PartitionConfig is not nil. -func (v *WorkflowExecutionInfo) IsSetPartitionConfig() bool { - return v != nil && v.PartitionConfig != nil + return nil } -// GetChecksum returns the value of Checksum if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetChecksum() (o []byte) { - if v != nil && v.Checksum != nil { - return v.Checksum +// String returns a readable string representation of a WorkflowTimerTaskInfo +// struct. +func (v *WorkflowTimerTaskInfo) String() string { + if v == nil { + return "" } - return -} + var fields [1]string + i := 0 + if v.References != nil { + fields[i] = fmt.Sprintf("References: %v", v.References) + i++ + } -// IsSetChecksum returns true if Checksum is not nil. -func (v *WorkflowExecutionInfo) IsSetChecksum() bool { - return v != nil && v.Checksum != nil + return fmt.Sprintf("WorkflowTimerTaskInfo{%v}", strings.Join(fields[:i], ", ")) } -// GetChecksumEncoding returns the value of ChecksumEncoding if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetChecksumEncoding() (o string) { - if v != nil && v.ChecksumEncoding != nil { - return *v.ChecksumEncoding +func _List_TimerReference_Equals(lhs, rhs []*TimerReference) bool { + if len(lhs) != len(rhs) { + return false } - return -} + for i, lv := range lhs { + rv := rhs[i] + if !lv.Equals(rv) { + return false + } + } -// IsSetChecksumEncoding returns true if ChecksumEncoding is not nil. -func (v *WorkflowExecutionInfo) IsSetChecksumEncoding() bool { - return v != nil && v.ChecksumEncoding != nil + return true } -// GetCronOverlapPolicy returns the value of CronOverlapPolicy if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetCronOverlapPolicy() (o shared.CronOverlapPolicy) { - if v != nil && v.CronOverlapPolicy != nil { - return *v.CronOverlapPolicy +// Equals returns true if all the fields of this WorkflowTimerTaskInfo match the +// provided WorkflowTimerTaskInfo. +// +// This function performs a deep comparison. +func (v *WorkflowTimerTaskInfo) Equals(rhs *WorkflowTimerTaskInfo) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.References == nil && rhs.References == nil) || (v.References != nil && rhs.References != nil && _List_TimerReference_Equals(v.References, rhs.References))) { + return false } - return + return true } -// IsSetCronOverlapPolicy returns true if CronOverlapPolicy is not nil. -func (v *WorkflowExecutionInfo) IsSetCronOverlapPolicy() bool { - return v != nil && v.CronOverlapPolicy != nil -} +type _List_TimerReference_Zapper []*TimerReference -// GetActiveClusterSelectionPolicy returns the value of ActiveClusterSelectionPolicy if it is set or its -// zero value if it is unset. -func (v *WorkflowExecutionInfo) GetActiveClusterSelectionPolicy() (o []byte) { - if v != nil && v.ActiveClusterSelectionPolicy != nil { - return v.ActiveClusterSelectionPolicy +// MarshalLogArray implements zapcore.ArrayMarshaler, enabling +// fast logging of _List_TimerReference_Zapper. +func (l _List_TimerReference_Zapper) MarshalLogArray(enc zapcore.ArrayEncoder) (err error) { + for _, v := range l { + err = multierr.Append(err, enc.AppendObject(v)) } - - return + return err } -// IsSetActiveClusterSelectionPolicy returns true if ActiveClusterSelectionPolicy is not nil. -func (v *WorkflowExecutionInfo) IsSetActiveClusterSelectionPolicy() bool { - return v != nil && v.ActiveClusterSelectionPolicy != nil +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of WorkflowTimerTaskInfo. +func (v *WorkflowTimerTaskInfo) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.References != nil { + err = multierr.Append(err, enc.AddArray("references", (_List_TimerReference_Zapper)(v.References))) + } + return err } -// GetActiveClusterSelectionPolicyEncoding returns the value of ActiveClusterSelectionPolicyEncoding if it is set or its +// GetReferences returns the value of References if it is set or its // zero value if it is unset. -func (v *WorkflowExecutionInfo) GetActiveClusterSelectionPolicyEncoding() (o string) { - if v != nil && v.ActiveClusterSelectionPolicyEncoding != nil { - return *v.ActiveClusterSelectionPolicyEncoding +func (v *WorkflowTimerTaskInfo) GetReferences() (o []*TimerReference) { + if v != nil && v.References != nil { + return v.References } return } -// IsSetActiveClusterSelectionPolicyEncoding returns true if ActiveClusterSelectionPolicyEncoding is not nil. -func (v *WorkflowExecutionInfo) IsSetActiveClusterSelectionPolicyEncoding() bool { - return v != nil && v.ActiveClusterSelectionPolicyEncoding != nil +// IsSetReferences returns true if References is not nil. +func (v *WorkflowTimerTaskInfo) IsSetReferences() bool { + return v != nil && v.References != nil } // ThriftModule represents the IDL file used to generate this package. @@ -18827,11 +19507,11 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "sqlblobs", Package: "github.com/uber/cadence/.gen/go/sqlblobs", FilePath: "sqlblobs.thrift", - SHA1: "0dff88f83f8da7964b56924522e5106c27bd242e", + SHA1: "e3cf1b490ecd18870cf52181334057127f8bee18", Includes: []*thriftreflect.ThriftModule{ shared.ThriftModule, }, Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.sqlblobs\n\ninclude \"shared.thrift\"\n\nstruct ShardInfo {\n 10: optional i32 stolenSinceRenew\n 12: optional i64 (js.type = \"Long\") updatedAtNanos\n 14: optional i64 (js.type = \"Long\") replicationAckLevel\n 16: optional i64 (js.type = \"Long\") transferAckLevel\n 18: optional i64 (js.type = \"Long\") timerAckLevelNanos\n 24: optional i64 (js.type = \"Long\") domainNotificationVersion\n 34: optional map clusterTransferAckLevel\n 36: optional map clusterTimerAckLevel\n 38: optional string owner\n 40: optional map clusterReplicationLevel\n 42: optional binary pendingFailoverMarkers\n 44: optional string pendingFailoverMarkersEncoding\n 46: optional map replicationDlqAckLevel\n 50: optional binary transferProcessingQueueStates\n 51: optional string transferProcessingQueueStatesEncoding\n 55: optional binary timerProcessingQueueStates\n 56: optional string timerProcessingQueueStatesEncoding\n 60: optional binary crossClusterProcessingQueueStates\n 61: optional string crossClusterProcessingQueueStatesEncoding\n 64: optional map queueStates\n}\n\nstruct DomainInfo {\n 10: optional string name\n 12: optional string description\n 14: optional string owner\n 16: optional i32 status\n 18: optional i16 retentionDays\n 20: optional bool emitMetric\n 22: optional string archivalBucket\n 24: optional i16 archivalStatus\n 26: optional i64 (js.type = \"Long\") configVersion\n 28: optional i64 (js.type = \"Long\") notificationVersion\n 30: optional i64 (js.type = \"Long\") failoverNotificationVersion\n 32: optional i64 (js.type = \"Long\") failoverVersion\n 34: optional string activeClusterName\n 36: optional list clusters\n 38: optional map data\n 39: optional binary badBinaries\n 40: optional string badBinariesEncoding\n 42: optional i16 historyArchivalStatus\n 44: optional string historyArchivalURI\n 46: optional i16 visibilityArchivalStatus\n 48: optional string visibilityArchivalURI\n 50: optional i64 (js.type = \"Long\") failoverEndTime\n 52: optional i64 (js.type = \"Long\") previousFailoverVersion\n 54: optional i64 (js.type = \"Long\") lastUpdatedTime\n 56: optional binary isolationGroupsConfiguration\n 58: optional string isolationGroupsConfigurationEncoding\n 60: optional binary asyncWorkflowConfiguration\n 62: optional string asyncWorkflowConfigurationEncoding\n 64: optional binary activeClustersConfiguration\n 66: optional string activeClustersConfigurationEncoding\n}\n\nstruct HistoryTreeInfo {\n 10: optional i64 (js.type = \"Long\") createdTimeNanos // For fork operation to prevent race condition of leaking event data when forking branches fail. Also can be used for clean up leaked data\n 12: optional list ancestors\n 14: optional string info // For lookup back to workflow during debugging, also background cleanup when fork operation cannot finish self cleanup due to crash.\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional binary parentDomainID\n 12: optional string parentWorkflowID\n 14: optional binary parentRunID\n 16: optional i64 (js.type = \"Long\") initiatedID\n 18: optional i64 (js.type = \"Long\") completionEventBatchID\n 20: optional binary completionEvent\n 22: optional string completionEventEncoding\n 24: optional string taskList\n 25: optional shared.TaskListKind taskListKind\n 26: optional string workflowTypeName\n 28: optional i32 workflowTimeoutSeconds\n 30: optional i32 decisionTaskTimeoutSeconds\n 32: optional binary executionContext\n 34: optional i32 state\n 36: optional i32 closeStatus\n 38: optional i64 (js.type = \"Long\") startVersion\n 44: optional i64 (js.type = \"Long\") lastWriteEventID\n 48: optional i64 (js.type = \"Long\") lastEventTaskID\n 50: optional i64 (js.type = \"Long\") lastFirstEventID\n 52: optional i64 (js.type = \"Long\") lastProcessedEvent\n 54: optional i64 (js.type = \"Long\") startTimeNanos\n 56: optional i64 (js.type = \"Long\") lastUpdatedTimeNanos\n 58: optional i64 (js.type = \"Long\") decisionVersion\n 60: optional i64 (js.type = \"Long\") decisionScheduleID\n 62: optional i64 (js.type = \"Long\") decisionStartedID\n 64: optional i32 decisionTimeout\n 66: optional i64 (js.type = \"Long\") decisionAttempt\n 68: optional i64 (js.type = \"Long\") decisionStartedTimestampNanos\n 69: optional i64 (js.type = \"Long\") decisionScheduledTimestampNanos\n 70: optional bool cancelRequested\n 71: optional i64 (js.type = \"Long\") decisionOriginalScheduledTimestampNanos\n 72: optional string createRequestID\n 74: optional string decisionRequestID\n 76: optional string cancelRequestID\n 78: optional string stickyTaskList\n 80: optional i64 (js.type = \"Long\") stickyScheduleToStartTimeout\n 82: optional i64 (js.type = \"Long\") retryAttempt\n 84: optional i32 retryInitialIntervalSeconds\n 86: optional i32 retryMaximumIntervalSeconds\n 88: optional i32 retryMaximumAttempts\n 90: optional i32 retryExpirationSeconds\n 92: optional double retryBackoffCoefficient\n 94: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 96: optional list retryNonRetryableErrors\n 98: optional bool hasRetryPolicy\n 100: optional string cronSchedule\n 102: optional i32 eventStoreVersion\n 104: optional binary eventBranchToken\n 106: optional i64 (js.type = \"Long\") signalCount\n 108: optional i64 (js.type = \"Long\") historySize\n 110: optional string clientLibraryVersion\n 112: optional string clientFeatureVersion\n 114: optional string clientImpl\n 115: optional binary autoResetPoints\n 116: optional string autoResetPointsEncoding\n 118: optional map searchAttributes\n 120: optional map memo\n 122: optional binary versionHistories\n 124: optional string versionHistoriesEncoding\n 126: optional binary firstExecutionRunID\n 128: optional map partitionConfig\n 130: optional binary checksum\n 132: optional string checksumEncoding\n 134: optional shared.CronOverlapPolicy cronOverlapPolicy\n 137: optional binary activeClusterSelectionPolicy\n 138: optional string activeClusterSelectionPolicyEncoding\n}\n\nstruct ActivityInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") scheduledEventBatchID\n 14: optional binary scheduledEvent\n 16: optional string scheduledEventEncoding\n 18: optional i64 (js.type = \"Long\") scheduledTimeNanos\n 20: optional i64 (js.type = \"Long\") startedID\n 22: optional binary startedEvent\n 24: optional string startedEventEncoding\n 26: optional i64 (js.type = \"Long\") startedTimeNanos\n 28: optional string activityID\n 30: optional string requestID\n 32: optional i32 scheduleToStartTimeoutSeconds\n 34: optional i32 scheduleToCloseTimeoutSeconds\n 36: optional i32 startToCloseTimeoutSeconds\n 38: optional i32 heartbeatTimeoutSeconds\n 40: optional bool cancelRequested\n 42: optional i64 (js.type = \"Long\") cancelRequestID\n 44: optional i32 timerTaskStatus\n 46: optional i32 attempt\n 48: optional string taskList\n 49: optional shared.TaskListKind taskListKind\n 50: optional string startedIdentity\n 52: optional bool hasRetryPolicy\n 54: optional i32 retryInitialIntervalSeconds\n 56: optional i32 retryMaximumIntervalSeconds\n 58: optional i32 retryMaximumAttempts\n 60: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 62: optional double retryBackoffCoefficient\n 64: optional list retryNonRetryableErrors\n 66: optional string retryLastFailureReason\n 68: optional string retryLastWorkerIdentity\n 70: optional binary retryLastFailureDetails\n}\n\nstruct ChildExecutionInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 14: optional i64 (js.type = \"Long\") startedID\n 16: optional binary initiatedEvent\n 18: optional string initiatedEventEncoding\n 20: optional string startedWorkflowID\n 22: optional binary startedRunID\n 24: optional binary startedEvent\n 26: optional string startedEventEncoding\n 28: optional string createRequestID\n 29: optional string domainID\n 30: optional string domainName // deprecated\n 32: optional string workflowTypeName\n 35: optional i32 parentClosePolicy\n}\n\nstruct SignalInfo {\n 10: optional i64 (js.type = \"Long\") version\n 11: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 12: optional string requestID\n 14: optional string name\n 16: optional binary input\n 18: optional binary control\n}\n\nstruct RequestCancelInfo {\n 10: optional i64 (js.type = \"Long\") version\n 11: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 12: optional string cancelRequestID\n}\n\nstruct TimerInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") startedID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n // TaskID is a misleading variable, it actually serves\n // the purpose of indicating whether a timer task is\n // generated for this timer info\n 16: optional i64 (js.type = \"Long\") taskID\n}\n\nstruct TaskInfo {\n 10: optional string workflowID\n 12: optional binary runID\n 13: optional i64 (js.type = \"Long\") scheduleID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 15: optional i64 (js.type = \"Long\") createdTimeNanos\n 17: optional map partitionConfig\n}\n\nstruct TaskListPartition {\n 10: optional list isolationGroups\n}\n\nstruct TaskListPartitionConfig {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i32 numReadPartitions\n 14: optional i32 numWritePartitions\n 16: optional map readPartitions\n 18: optional map writePartitions\n}\n\nstruct TaskListInfo {\n 10: optional i16 kind // {Normal, Sticky}\n 12: optional i64 (js.type = \"Long\") ackLevel\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 16: optional i64 (js.type = \"Long\") lastUpdatedNanos\n 18: optional TaskListPartitionConfig adaptivePartitionConfig\n}\n\nstruct TransferTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional binary targetDomainID\n 20: optional string targetWorkflowID\n 22: optional binary targetRunID\n 24: optional string taskList\n 26: optional bool targetChildWorkflowOnly\n 28: optional i64 (js.type = \"Long\") scheduleID\n 30: optional i64 (js.type = \"Long\") version\n 32: optional i64 (js.type = \"Long\") visibilityTimestampNanos\n 34: optional set targetDomainIDs\n}\n\nstruct TimerTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i16 timeoutType\n 20: optional i64 (js.type = \"Long\") version\n 22: optional i64 (js.type = \"Long\") scheduleAttempt\n 24: optional i64 (js.type = \"Long\") eventID\n}\n\nstruct ReplicationTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i64 (js.type = \"Long\") version\n 20: optional i64 (js.type = \"Long\") firstEventID\n 22: optional i64 (js.type = \"Long\") nextEventID\n 24: optional i64 (js.type = \"Long\") scheduledID\n 26: optional i32 eventStoreVersion\n 28: optional i32 newRunEventStoreVersion\n 30: optional binary branch_token\n 34: optional binary newRunBranchToken\n 38: optional i64 (js.type = \"Long\") creationTime\n}\n\nenum AsyncRequestType {\n StartWorkflowExecutionAsyncRequest\n SignalWithStartWorkflowExecutionAsyncRequest\n}\n\nstruct AsyncRequestMessage {\n 10: optional string partitionKey\n 12: optional AsyncRequestType type\n 14: optional shared.Header header\n 16: optional string encoding\n 18: optional binary payload\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\nnamespace java com.uber.cadence.sqlblobs\n\ninclude \"shared.thrift\"\n\nstruct ShardInfo {\n 10: optional i32 stolenSinceRenew\n 12: optional i64 (js.type = \"Long\") updatedAtNanos\n 14: optional i64 (js.type = \"Long\") replicationAckLevel\n 16: optional i64 (js.type = \"Long\") transferAckLevel\n 18: optional i64 (js.type = \"Long\") timerAckLevelNanos\n 24: optional i64 (js.type = \"Long\") domainNotificationVersion\n 34: optional map clusterTransferAckLevel\n 36: optional map clusterTimerAckLevel\n 38: optional string owner\n 40: optional map clusterReplicationLevel\n 42: optional binary pendingFailoverMarkers\n 44: optional string pendingFailoverMarkersEncoding\n 46: optional map replicationDlqAckLevel\n 50: optional binary transferProcessingQueueStates\n 51: optional string transferProcessingQueueStatesEncoding\n 55: optional binary timerProcessingQueueStates\n 56: optional string timerProcessingQueueStatesEncoding\n 60: optional binary crossClusterProcessingQueueStates\n 61: optional string crossClusterProcessingQueueStatesEncoding\n 64: optional map queueStates\n}\n\nstruct DomainInfo {\n 10: optional string name\n 12: optional string description\n 14: optional string owner\n 16: optional i32 status\n 18: optional i16 retentionDays\n 20: optional bool emitMetric\n 22: optional string archivalBucket\n 24: optional i16 archivalStatus\n 26: optional i64 (js.type = \"Long\") configVersion\n 28: optional i64 (js.type = \"Long\") notificationVersion\n 30: optional i64 (js.type = \"Long\") failoverNotificationVersion\n 32: optional i64 (js.type = \"Long\") failoverVersion\n 34: optional string activeClusterName\n 36: optional list clusters\n 38: optional map data\n 39: optional binary badBinaries\n 40: optional string badBinariesEncoding\n 42: optional i16 historyArchivalStatus\n 44: optional string historyArchivalURI\n 46: optional i16 visibilityArchivalStatus\n 48: optional string visibilityArchivalURI\n 50: optional i64 (js.type = \"Long\") failoverEndTime\n 52: optional i64 (js.type = \"Long\") previousFailoverVersion\n 54: optional i64 (js.type = \"Long\") lastUpdatedTime\n 56: optional binary isolationGroupsConfiguration\n 58: optional string isolationGroupsConfigurationEncoding\n 60: optional binary asyncWorkflowConfiguration\n 62: optional string asyncWorkflowConfigurationEncoding\n 64: optional binary activeClustersConfiguration\n 66: optional string activeClustersConfigurationEncoding\n}\n\nstruct HistoryTreeInfo {\n 10: optional i64 (js.type = \"Long\") createdTimeNanos // For fork operation to prevent race condition of leaking event data when forking branches fail. Also can be used for clean up leaked data\n 12: optional list ancestors\n 14: optional string info // For lookup back to workflow during debugging, also background cleanup when fork operation cannot finish self cleanup due to crash.\n}\n\nstruct WorkflowExecutionInfo {\n 10: optional binary parentDomainID\n 12: optional string parentWorkflowID\n 14: optional binary parentRunID\n 16: optional i64 (js.type = \"Long\") initiatedID\n 18: optional i64 (js.type = \"Long\") completionEventBatchID\n 20: optional binary completionEvent\n 22: optional string completionEventEncoding\n 24: optional string taskList\n 25: optional shared.TaskListKind taskListKind\n 26: optional string workflowTypeName\n 28: optional i32 workflowTimeoutSeconds\n 30: optional i32 decisionTaskTimeoutSeconds\n 32: optional binary executionContext\n 34: optional i32 state\n 36: optional i32 closeStatus\n 38: optional i64 (js.type = \"Long\") startVersion\n 44: optional i64 (js.type = \"Long\") lastWriteEventID\n 48: optional i64 (js.type = \"Long\") lastEventTaskID\n 50: optional i64 (js.type = \"Long\") lastFirstEventID\n 52: optional i64 (js.type = \"Long\") lastProcessedEvent\n 54: optional i64 (js.type = \"Long\") startTimeNanos\n 56: optional i64 (js.type = \"Long\") lastUpdatedTimeNanos\n 58: optional i64 (js.type = \"Long\") decisionVersion\n 60: optional i64 (js.type = \"Long\") decisionScheduleID\n 62: optional i64 (js.type = \"Long\") decisionStartedID\n 64: optional i32 decisionTimeout\n 66: optional i64 (js.type = \"Long\") decisionAttempt\n 68: optional i64 (js.type = \"Long\") decisionStartedTimestampNanos\n 69: optional i64 (js.type = \"Long\") decisionScheduledTimestampNanos\n 70: optional bool cancelRequested\n 71: optional i64 (js.type = \"Long\") decisionOriginalScheduledTimestampNanos\n 72: optional string createRequestID\n 74: optional string decisionRequestID\n 76: optional string cancelRequestID\n 78: optional string stickyTaskList\n 80: optional i64 (js.type = \"Long\") stickyScheduleToStartTimeout\n 82: optional i64 (js.type = \"Long\") retryAttempt\n 84: optional i32 retryInitialIntervalSeconds\n 86: optional i32 retryMaximumIntervalSeconds\n 88: optional i32 retryMaximumAttempts\n 90: optional i32 retryExpirationSeconds\n 92: optional double retryBackoffCoefficient\n 94: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 96: optional list retryNonRetryableErrors\n 98: optional bool hasRetryPolicy\n 100: optional string cronSchedule\n 102: optional i32 eventStoreVersion\n 104: optional binary eventBranchToken\n 106: optional i64 (js.type = \"Long\") signalCount\n 108: optional i64 (js.type = \"Long\") historySize\n 110: optional string clientLibraryVersion\n 112: optional string clientFeatureVersion\n 114: optional string clientImpl\n 115: optional binary autoResetPoints\n 116: optional string autoResetPointsEncoding\n 118: optional map searchAttributes\n 120: optional map memo\n 122: optional binary versionHistories\n 124: optional string versionHistoriesEncoding\n 126: optional binary firstExecutionRunID\n 128: optional map partitionConfig\n 130: optional binary checksum\n 132: optional string checksumEncoding\n 134: optional shared.CronOverlapPolicy cronOverlapPolicy\n 137: optional binary activeClusterSelectionPolicy\n 138: optional string activeClusterSelectionPolicyEncoding\n}\n\nstruct ActivityInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") scheduledEventBatchID\n 14: optional binary scheduledEvent\n 16: optional string scheduledEventEncoding\n 18: optional i64 (js.type = \"Long\") scheduledTimeNanos\n 20: optional i64 (js.type = \"Long\") startedID\n 22: optional binary startedEvent\n 24: optional string startedEventEncoding\n 26: optional i64 (js.type = \"Long\") startedTimeNanos\n 28: optional string activityID\n 30: optional string requestID\n 32: optional i32 scheduleToStartTimeoutSeconds\n 34: optional i32 scheduleToCloseTimeoutSeconds\n 36: optional i32 startToCloseTimeoutSeconds\n 38: optional i32 heartbeatTimeoutSeconds\n 40: optional bool cancelRequested\n 42: optional i64 (js.type = \"Long\") cancelRequestID\n 44: optional i32 timerTaskStatus\n 46: optional i32 attempt\n 48: optional string taskList\n 49: optional shared.TaskListKind taskListKind\n 50: optional string startedIdentity\n 52: optional bool hasRetryPolicy\n 54: optional i32 retryInitialIntervalSeconds\n 56: optional i32 retryMaximumIntervalSeconds\n 58: optional i32 retryMaximumAttempts\n 60: optional i64 (js.type = \"Long\") retryExpirationTimeNanos\n 62: optional double retryBackoffCoefficient\n 64: optional list retryNonRetryableErrors\n 66: optional string retryLastFailureReason\n 68: optional string retryLastWorkerIdentity\n 70: optional binary retryLastFailureDetails\n}\n\nstruct ChildExecutionInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 14: optional i64 (js.type = \"Long\") startedID\n 16: optional binary initiatedEvent\n 18: optional string initiatedEventEncoding\n 20: optional string startedWorkflowID\n 22: optional binary startedRunID\n 24: optional binary startedEvent\n 26: optional string startedEventEncoding\n 28: optional string createRequestID\n 29: optional string domainID\n 30: optional string domainName // deprecated\n 32: optional string workflowTypeName\n 35: optional i32 parentClosePolicy\n}\n\nstruct SignalInfo {\n 10: optional i64 (js.type = \"Long\") version\n 11: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 12: optional string requestID\n 14: optional string name\n 16: optional binary input\n 18: optional binary control\n}\n\nstruct RequestCancelInfo {\n 10: optional i64 (js.type = \"Long\") version\n 11: optional i64 (js.type = \"Long\") initiatedEventBatchID\n 12: optional string cancelRequestID\n}\n\nstruct TimerInfo {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i64 (js.type = \"Long\") startedID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n // TaskID is a misleading variable, it actually serves\n // the purpose of indicating whether a timer task is\n // generated for this timer info\n 16: optional i64 (js.type = \"Long\") taskID\n}\n\nstruct TaskInfo {\n 10: optional string workflowID\n 12: optional binary runID\n 13: optional i64 (js.type = \"Long\") scheduleID\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 15: optional i64 (js.type = \"Long\") createdTimeNanos\n 17: optional map partitionConfig\n}\n\nstruct TaskListPartition {\n 10: optional list isolationGroups\n}\n\nstruct TaskListPartitionConfig {\n 10: optional i64 (js.type = \"Long\") version\n 12: optional i32 numReadPartitions\n 14: optional i32 numWritePartitions\n 16: optional map readPartitions\n 18: optional map writePartitions\n}\n\nstruct TaskListInfo {\n 10: optional i16 kind // {Normal, Sticky}\n 12: optional i64 (js.type = \"Long\") ackLevel\n 14: optional i64 (js.type = \"Long\") expiryTimeNanos\n 16: optional i64 (js.type = \"Long\") lastUpdatedNanos\n 18: optional TaskListPartitionConfig adaptivePartitionConfig\n}\n\nstruct TransferTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional binary targetDomainID\n 20: optional string targetWorkflowID\n 22: optional binary targetRunID\n 24: optional string taskList\n 26: optional bool targetChildWorkflowOnly\n 28: optional i64 (js.type = \"Long\") scheduleID\n 30: optional i64 (js.type = \"Long\") version\n 32: optional i64 (js.type = \"Long\") visibilityTimestampNanos\n 34: optional set targetDomainIDs\n}\n\nstruct TimerTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i16 timeoutType\n 20: optional i64 (js.type = \"Long\") version\n 22: optional i64 (js.type = \"Long\") scheduleAttempt\n 24: optional i64 (js.type = \"Long\") eventID\n}\n\nstruct ReplicationTaskInfo {\n 10: optional binary domainID\n 12: optional string workflowID\n 14: optional binary runID\n 16: optional i16 taskType\n 18: optional i64 (js.type = \"Long\") version\n 20: optional i64 (js.type = \"Long\") firstEventID\n 22: optional i64 (js.type = \"Long\") nextEventID\n 24: optional i64 (js.type = \"Long\") scheduledID\n 26: optional i32 eventStoreVersion\n 28: optional i32 newRunEventStoreVersion\n 30: optional binary branch_token\n 34: optional binary newRunBranchToken\n 38: optional i64 (js.type = \"Long\") creationTime\n}\n\nenum AsyncRequestType {\n StartWorkflowExecutionAsyncRequest\n SignalWithStartWorkflowExecutionAsyncRequest\n}\n\nstruct AsyncRequestMessage {\n 10: optional string partitionKey\n 12: optional AsyncRequestType type\n 14: optional shared.Header header\n 16: optional string encoding\n 18: optional binary payload\n}\n\n// a substruct on the executions record which is intended to be used to track\n// timers and other records for debugging and cleanup\nstruct WorkflowTimerTaskInfo {\n 10: optional list references\n}\n\nstruct TimerReference {\n // Primary Keys. Always required\n // a reference to the the execution table task_id\n 10: optional i64 taskID\n // a reference to the execution table visibility_ts\n 11: optional i64 (js.type = \"Long\") visibilityTimestamp\n\n // Reference fields:\n // for workflow timer values, the type of timeout\n 13: optional i16 TimeoutType\n}\n" diff --git a/common/dynamicconfig/dynamicproperties/constants.go b/common/dynamicconfig/dynamicproperties/constants.go index f8ff1b600f7..4c703ef1120 100644 --- a/common/dynamicconfig/dynamicproperties/constants.go +++ b/common/dynamicconfig/dynamicproperties/constants.go @@ -1649,6 +1649,14 @@ const ( // Default value: true // Allowed filters: N/A EnableGRPCOutbound + // EnableExecutionInfoTracking is the key for enabling execution info tracking tasks on the mutable state. + // for the purpose of cleaning them up after workflow closed. This is a feature-flag with the intention + // of it being removed in the future and defaulted to true + // KeyName: system.enableExecutionInfoTracking + // Value type: Bool + // Default value: false + // Allowed filters: N/A + EnableExecutionInfoTracking // EnableSQLAsyncTransaction is the key for enabling async transaction // KeyName: system.enableSQLAsyncTransaction // Value type: Bool @@ -2808,6 +2816,14 @@ const ( // Default value: 5m (5*time.Minute) // Allowed filters: N/A StandbyClusterDelay + // WorkflowTaskCleanupThreshold Is the time, above which, it will attempt to cleanup tasks on workflow deletion + // but below which, it will skip cleanup attempts, based on the assumption that short-lived workflows will be mostly + // creating extremely short-lived timer tasks and there's no real value in explicitly going and deleting them + // KeyName: history.taskCleanupTimeoutThreshold + // Value type: Duration + // Default value: 1d (24 hours) + // Allowed filters: N/A + TaskCleanupTimeoutThreshold // StandbyTaskMissingEventsResendDelay is the amount of time standby cluster's will wait (if events are missing)before calling remote for missing events // KeyName: history.standbyTaskMissingEventsResendDelay // Value type: Duration @@ -4410,6 +4426,11 @@ var BoolKeys = map[BoolKey]DynamicBool{ Description: "EnableGRPCOutbound is the key for enabling outbound GRPC traffic", DefaultValue: true, }, + EnableExecutionInfoTracking: { + KeyName: "system.enableExecutionInfoTracking", + Description: "EnableExecutionInfoTracking is the key for enabling execution info tracking tasks on the mutable state. for the purpose of cleaning them up after workflow closed. This is a feature-flag with the intention of it being removed in the future and defaulted to true", + DefaultValue: false, + }, EnableSQLAsyncTransaction: { KeyName: "system.enableSQLAsyncTransaction", Description: "EnableSQLAsyncTransaction is the key for enabling async transaction", @@ -5387,6 +5408,11 @@ var DurationKeys = map[DurationKey]DynamicDuration{ Description: "StandbyClusterDelay is the artificial delay added to standby cluster's view of active cluster's time", DefaultValue: time.Minute * 5, }, + TaskCleanupTimeoutThreshold: { + KeyName: "history.taskCleanupTimeoutThreshold", + Description: "TaskCleanupTimeoutThreshold is the time, above which, it will attempt to cleanup tasks on workflow deletion but below which, it will skip cleanup attempts, based on the assumption that short-lived workflows will be mostly creating extremely short-lived timer tasks and there's no real value in explicitly going and deleting them", + DefaultValue: time.Hour * 24, + }, StandbyTaskMissingEventsResendDelay: { KeyName: "history.standbyTaskMissingEventsResendDelay", Description: "StandbyTaskMissingEventsResendDelay is the amount of time standby cluster's will wait (if events are missing)before calling remote for missing events", diff --git a/common/log/tag/values.go b/common/log/tag/values.go index b683e45a7a7..119acc5965f 100644 --- a/common/log/tag/values.go +++ b/common/log/tag/values.go @@ -232,6 +232,7 @@ var ( StoreOperationCreateFailoverMarkerTasks = storeOperation("createFailoverMarkerTasks") StoreOperationGetTimerIndexTasks = storeOperation("get-timer-index-tasks") StoreOperationCompleteTimerTask = storeOperation("complete-timer-task") + StoreOperationDeleteTimerTask = storeOperation("delete-timer-task") StoreOperationGetHistoryTasks = storeOperation("get-history-tasks") StoreOperationCompleteHistoryTask = storeOperation("complete-history-task") StoreOperationRangeCompleteHistoryTask = storeOperation("range-complete-history-task") diff --git a/common/metrics/defs.go b/common/metrics/defs.go index aebbffa80d5..d742f890d9d 100644 --- a/common/metrics/defs.go +++ b/common/metrics/defs.go @@ -213,6 +213,8 @@ const ( PersistenceCompleteHistoryTaskScope // PersistenceRangeCompleteHistoryTaskScope tracks RangeCompleteHistoryTask calls made by service to persistence layer PersistenceRangeCompleteHistoryTaskScope + // PersistenceDeleteTimerTaskScope tracks DeleteTimerTask calls made by service to persistence layer + PersistenceDeleteTimerTaskScope // PersistenceCreateTasksScope tracks CreateTask calls made by service to persistence layer PersistenceCreateTasksScope // PersistenceGetTasksScope tracks GetTasks calls made by service to persistence layer @@ -1532,6 +1534,7 @@ var ScopeDefs = map[ServiceIdx]map[ScopeIdx]scopeDefinition{ PersistenceGetHistoryTasksScope: {operation: "GetHistoryTasks"}, PersistenceCompleteHistoryTaskScope: {operation: "CompleteHistoryTask"}, PersistenceRangeCompleteHistoryTaskScope: {operation: "RangeCompleteHistoryTask"}, + PersistenceDeleteTimerTaskScope: {operation: "DeleteTimerTask"}, PersistenceCreateTasksScope: {operation: "CreateTask"}, PersistenceGetTasksScope: {operation: "GetTasks"}, PersistenceCompleteTaskScope: {operation: "CompleteTask"}, diff --git a/common/mocks/ExecutionManager.go b/common/mocks/ExecutionManager.go index 04c1754e00f..7fd07a1c5e9 100644 --- a/common/mocks/ExecutionManager.go +++ b/common/mocks/ExecutionManager.go @@ -165,6 +165,20 @@ func (_m *ExecutionManager) DeleteWorkflowExecution(ctx context.Context, request return r0 } +// DeleteTimerTask provides a mock function with given fields: ctx, request +func (_m *ExecutionManager) DeleteTimerTask(ctx context.Context, request *persistence.DeleteTimerTaskRequest) error { + ret := _m.Called(ctx, request) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, *persistence.DeleteTimerTaskRequest) error); ok { + r0 = rf(ctx, request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // GetCurrentExecution provides a mock function with given fields: ctx, request func (_m *ExecutionManager) GetCurrentExecution(ctx context.Context, request *persistence.GetCurrentExecutionRequest) (*persistence.GetCurrentExecutionResponse, error) { ret := _m.Called(ctx, request) diff --git a/common/persistence/config.go b/common/persistence/config.go index 97466cf488a..13bc33ccd22 100644 --- a/common/persistence/config.go +++ b/common/persistence/config.go @@ -28,6 +28,7 @@ import ( type ( // DynamicConfiguration represents dynamic configuration for persistence layer DynamicConfiguration struct { + EnableExecutionInfoTracking dynamicproperties.BoolPropertyFn EnableSQLAsyncTransaction dynamicproperties.BoolPropertyFn EnableCassandraAllConsistencyLevelDelete dynamicproperties.BoolPropertyFn PersistenceSampleLoggingRate dynamicproperties.IntPropertyFn @@ -44,6 +45,7 @@ type ( // NewDynamicConfiguration returns new config with default values func NewDynamicConfiguration(dc *dynamicconfig.Collection) *DynamicConfiguration { return &DynamicConfiguration{ + EnableExecutionInfoTracking: dc.GetBoolProperty(dynamicproperties.EnableExecutionInfoTracking), EnableSQLAsyncTransaction: dc.GetBoolProperty(dynamicproperties.EnableSQLAsyncTransaction), EnableCassandraAllConsistencyLevelDelete: dc.GetBoolProperty(dynamicproperties.EnableCassandraAllConsistencyLevelDelete), PersistenceSampleLoggingRate: dc.GetIntProperty(dynamicproperties.SampleLoggingRate), diff --git a/common/persistence/data_manager_interfaces.go b/common/persistence/data_manager_interfaces.go index 8ef0546af32..061526146e4 100644 --- a/common/persistence/data_manager_interfaces.go +++ b/common/persistence/data_manager_interfaces.go @@ -597,18 +597,19 @@ type ( // WorkflowMutableState indicates workflow related state WorkflowMutableState struct { - ActivityInfos map[int64]*ActivityInfo - TimerInfos map[string]*TimerInfo - ChildExecutionInfos map[int64]*ChildExecutionInfo - RequestCancelInfos map[int64]*RequestCancelInfo - SignalInfos map[int64]*SignalInfo - SignalRequestedIDs map[string]struct{} - ExecutionInfo *WorkflowExecutionInfo - ExecutionStats *ExecutionStats - BufferedEvents []*types.HistoryEvent - VersionHistories *VersionHistories - ReplicationState *ReplicationState // TODO: remove this after all 2DC workflows complete - Checksum checksum.Checksum + ActivityInfos map[int64]*ActivityInfo + TimerInfos map[string]*TimerInfo + ChildExecutionInfos map[int64]*ChildExecutionInfo + RequestCancelInfos map[int64]*RequestCancelInfo + SignalInfos map[int64]*SignalInfo + SignalRequestedIDs map[string]struct{} + WorkflowTimerTaskInfos []*WorkflowTimerTaskInfo + ExecutionInfo *WorkflowExecutionInfo + ExecutionStats *ExecutionStats + BufferedEvents []*types.HistoryEvent + VersionHistories *VersionHistories + ReplicationState *ReplicationState // TODO: remove this after all 2DC workflows complete + Checksum checksum.Checksum } // ActivityInfo details. @@ -661,6 +662,15 @@ type ( TaskStatus int64 } + // WorkflowTimerTaskInfo contains metadata about workflow-level timer tasks. + // These are timer tasks that are associated with the workflow execution itself + // rather than user-created timers or activities (e.g., WorkflowTimeoutTask). + WorkflowTimerTaskInfo struct { + TimeoutType int + TaskID int64 + VisibilityTimestamp time.Time + } + // ChildExecutionInfo has details for pending child executions. ChildExecutionInfo struct { Version int64 @@ -876,6 +886,7 @@ type ( DeleteActivityInfos []int64 UpsertTimerInfos []*TimerInfo DeleteTimerInfos []string + WorkflowTimerTasks []*WorkflowTimerTaskInfo UpsertChildExecutionInfos []*ChildExecutionInfo DeleteChildExecutionInfos []int64 UpsertRequestCancelInfos []*RequestCancelInfo @@ -903,6 +914,7 @@ type ( ActivityInfos []*ActivityInfo TimerInfos []*TimerInfo + WorkflowTimerTasks []*WorkflowTimerTaskInfo ChildExecutionInfos []*ChildExecutionInfo RequestCancelInfos []*RequestCancelInfo SignalInfos []*SignalInfo @@ -932,6 +944,12 @@ type ( DomainName string } + // DeleteTimerTaskRequest is used to delete a timer task + DeleteTimerTaskRequest struct { + VisibilityTimestamp time.Time + TaskID int64 + } + // PutReplicationTaskToDLQRequest is used to put a replication task to dlq PutReplicationTaskToDLQRequest struct { SourceClusterName string @@ -1614,6 +1632,7 @@ type ( GetHistoryTasks(ctx context.Context, request *GetHistoryTasksRequest) (*GetHistoryTasksResponse, error) CompleteHistoryTask(ctx context.Context, request *CompleteHistoryTaskRequest) error RangeCompleteHistoryTask(ctx context.Context, request *RangeCompleteHistoryTaskRequest) (*RangeCompleteHistoryTaskResponse, error) + DeleteTimerTask(ctx context.Context, request *DeleteTimerTaskRequest) error // Scan operations diff --git a/common/persistence/data_manager_interfaces_mock.go b/common/persistence/data_manager_interfaces_mock.go index 05e6e252e57..067704ef579 100644 --- a/common/persistence/data_manager_interfaces_mock.go +++ b/common/persistence/data_manager_interfaces_mock.go @@ -493,6 +493,20 @@ func (mr *MockExecutionManagerMockRecorder) DeleteReplicationTaskFromDLQ(ctx, re return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteReplicationTaskFromDLQ", reflect.TypeOf((*MockExecutionManager)(nil).DeleteReplicationTaskFromDLQ), ctx, request) } +// DeleteTimerTask mocks base method. +func (m *MockExecutionManager) DeleteTimerTask(ctx context.Context, request *DeleteTimerTaskRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteTimerTask", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteTimerTask indicates an expected call of DeleteTimerTask. +func (mr *MockExecutionManagerMockRecorder) DeleteTimerTask(ctx, request any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTimerTask", reflect.TypeOf((*MockExecutionManager)(nil).DeleteTimerTask), ctx, request) +} + // DeleteWorkflowExecution mocks base method. func (m *MockExecutionManager) DeleteWorkflowExecution(ctx context.Context, request *DeleteWorkflowExecutionRequest) error { m.ctrl.T.Helper() diff --git a/common/persistence/data_store_interfaces.go b/common/persistence/data_store_interfaces.go index 7cbc41ea5b7..550ad7696c2 100644 --- a/common/persistence/data_store_interfaces.go +++ b/common/persistence/data_store_interfaces.go @@ -130,6 +130,7 @@ type ( GetHistoryTasks(ctx context.Context, request *GetHistoryTasksRequest) (*GetHistoryTasksResponse, error) CompleteHistoryTask(ctx context.Context, request *CompleteHistoryTaskRequest) error RangeCompleteHistoryTask(ctx context.Context, request *RangeCompleteHistoryTaskRequest) (*RangeCompleteHistoryTaskResponse, error) + DeleteTimerTask(ctx context.Context, request *DeleteTimerTaskRequest) error // Scan related methods ListConcreteExecutions(ctx context.Context, request *ListConcreteExecutionsRequest) (*InternalListConcreteExecutionsResponse, error) @@ -351,6 +352,7 @@ type ( SignalInfos map[int64]*SignalInfo SignalRequestedIDs map[string]struct{} BufferedEvents []*DataBlob + WorkflowTimerTasks *DataBlob // Checksum field is used by Cassandra storage // ChecksumData is used by All SQL storage @@ -462,6 +464,7 @@ type ( DeleteActivityInfos []int64 UpsertTimerInfos []*TimerInfo DeleteTimerInfos []string + WorkflowTimerTasks *DataBlob UpsertChildExecutionInfos []*InternalChildExecutionInfo DeleteChildExecutionInfos []int64 UpsertRequestCancelInfos []*RequestCancelInfo @@ -492,6 +495,7 @@ type ( ActivityInfos []*InternalActivityInfo TimerInfos []*TimerInfo + WorkflowTimerTasks *DataBlob ChildExecutionInfos []*InternalChildExecutionInfo RequestCancelInfos []*RequestCancelInfo SignalInfos []*SignalInfo diff --git a/common/persistence/data_store_interfaces_mock.go b/common/persistence/data_store_interfaces_mock.go index 944dddc4379..631d9c1ce33 100644 --- a/common/persistence/data_store_interfaces_mock.go +++ b/common/persistence/data_store_interfaces_mock.go @@ -151,6 +151,20 @@ func (mr *MockExecutionStoreMockRecorder) DeleteReplicationTaskFromDLQ(ctx, requ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteReplicationTaskFromDLQ", reflect.TypeOf((*MockExecutionStore)(nil).DeleteReplicationTaskFromDLQ), ctx, request) } +// DeleteTimerTask mocks base method. +func (m *MockExecutionStore) DeleteTimerTask(ctx context.Context, request *DeleteTimerTaskRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteTimerTask", ctx, request) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteTimerTask indicates an expected call of DeleteTimerTask. +func (mr *MockExecutionStoreMockRecorder) DeleteTimerTask(ctx, request any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTimerTask", reflect.TypeOf((*MockExecutionStore)(nil).DeleteTimerTask), ctx, request) +} + // DeleteWorkflowExecution mocks base method. func (m *MockExecutionStore) DeleteWorkflowExecution(ctx context.Context, request *DeleteWorkflowExecutionRequest) error { m.ctrl.T.Helper() diff --git a/common/persistence/execution_manager.go b/common/persistence/execution_manager.go index 88099fd8726..42191f1d0eb 100644 --- a/common/persistence/execution_manager.go +++ b/common/persistence/execution_manager.go @@ -98,6 +98,10 @@ func (m *executionManagerImpl) GetWorkflowExecution( }, } + newResponse.State.WorkflowTimerTaskInfos, err = m.DeserializeWorkflowTimerTasks(response.State.WorkflowTimerTasks) + if err != nil { + return nil, err + } newResponse.State.ActivityInfos, err = m.DeserializeActivityInfos(response.State.ActivityInfos) if err != nil { return nil, err @@ -281,6 +285,16 @@ func (m *executionManagerImpl) DeserializeChildExecutionInfos( return newInfos, nil } +func (m *executionManagerImpl) DeserializeWorkflowTimerTasks( + blob *DataBlob, +) ([]*WorkflowTimerTaskInfo, error) { + if blob == nil || len(blob.Data) == 0 { + return nil, nil + } + + return m.serializer.DeserializeWorkflowTimerTasks(blob) +} + func (m *executionManagerImpl) DeserializeActivityInfos( infos map[int64]*InternalActivityInfo, ) (map[int64]*ActivityInfo, error) { @@ -411,6 +425,13 @@ func (m *executionManagerImpl) SerializeUpsertChildExecutionInfos( return newInfos, nil } +func (m *executionManagerImpl) SerializeWorkflowTimerTasks( + tasks []*WorkflowTimerTaskInfo, + encoding constants.EncodingType, +) (*DataBlob, error) { + return m.serializer.SerializeWorkflowTimerTasks(tasks, encoding) +} + func (m *executionManagerImpl) SerializeUpsertActivityInfos( infos []*ActivityInfo, encoding constants.EncodingType, @@ -609,6 +630,9 @@ func (m *executionManagerImpl) CreateWorkflowExecution( ctx context.Context, request *CreateWorkflowExecutionRequest, ) (*CreateWorkflowExecutionResponse, error) { + + m.syncExecutionInfoWithTasks(&request.NewWorkflowSnapshot) + serializedNewWorkflowSnapshot, err := m.SerializeWorkflowSnapshot(&request.NewWorkflowSnapshot, constants.EncodingType(m.dc.SerializationEncoding())) if err != nil { return nil, err @@ -637,6 +661,40 @@ func (m *executionManagerImpl) CreateWorkflowExecution( return &CreateWorkflowExecutionResponse{MutableStateUpdateSessionStats: msuss}, nil } +// The goal of this is to ensure that the mutable state / execution info is in sync with the tasks. +// for the purpose of tracking them, such that when we go to clean up +// +// The reason this must be done here is that the tasks are first created, then their IDs are assigned +// after their creation. In order to track these via the execution / mutable state record, we +// need to update these references after their creation and taskID assignment. +func (m *executionManagerImpl) syncExecutionInfoWithTasks(workflowSnapshot *WorkflowSnapshot) { + // for now, this is only a best effort thing. It might change for more rigourously + // tracking these. + if !m.dc.EnableExecutionInfoTracking() { + return + } + + for category, tasks := range workflowSnapshot.TasksByCategory { + for _, task := range tasks { + switch category.categoryID { + case HistoryTaskCategoryIDTimer: + timerTaskInfo, err := task.ToTimerTaskInfo() + if err != nil { + continue + } + workflowSnapshot.WorkflowTimerTasks = append(workflowSnapshot.WorkflowTimerTasks, &WorkflowTimerTaskInfo{ + TaskID: task.GetTaskID(), + VisibilityTimestamp: task.GetVisibilityTimestamp(), + TimeoutType: timerTaskInfo.TimeoutType, + }) + // not tracking any other types of tasks at present, although this may be reasonable in the future + default: + continue + } + } + } +} + func (m *executionManagerImpl) SerializeWorkflowMutation( input *WorkflowMutation, encoding constants.EncodingType, @@ -662,6 +720,10 @@ func (m *executionManagerImpl) SerializeWorkflowMutation( if err != nil { return nil, err } + serializedWorkflowTimerTasks, err := m.SerializeWorkflowTimerTasks(input.WorkflowTimerTasks, encoding) + if err != nil { + return nil, err + } var serializedNewBufferedEvents *DataBlob if input.NewBufferedEvents != nil { serializedNewBufferedEvents, err = m.serializer.SerializeBatchEvents(input.NewBufferedEvents, encoding) @@ -693,6 +755,7 @@ func (m *executionManagerImpl) SerializeWorkflowMutation( DeleteActivityInfos: input.DeleteActivityInfos, UpsertTimerInfos: input.UpsertTimerInfos, DeleteTimerInfos: input.DeleteTimerInfos, + WorkflowTimerTasks: serializedWorkflowTimerTasks, UpsertChildExecutionInfos: serializedUpsertChildExecutionInfos, DeleteChildExecutionInfos: input.DeleteChildExecutionInfos, UpsertRequestCancelInfos: input.UpsertRequestCancelInfos, @@ -739,7 +802,10 @@ func (m *executionManagerImpl) SerializeWorkflowSnapshot( if err != nil { return nil, err } - + serializedWorkflowTimerTasks, err := m.SerializeWorkflowTimerTasks(input.WorkflowTimerTasks, encoding) + if err != nil { + return nil, err + } startVersion, err := getStartVersion(input.VersionHistories) if err != nil { return nil, err @@ -762,6 +828,7 @@ func (m *executionManagerImpl) SerializeWorkflowSnapshot( ActivityInfos: serializedActivityInfos, TimerInfos: input.TimerInfos, + WorkflowTimerTasks: serializedWorkflowTimerTasks, ChildExecutionInfos: serializedChildExecutionInfos, RequestCancelInfos: input.RequestCancelInfos, SignalInfos: input.SignalInfos, @@ -1017,6 +1084,13 @@ func (m *executionManagerImpl) RangeCompleteHistoryTask( return m.persistence.RangeCompleteHistoryTask(ctx, request) } +func (m *executionManagerImpl) DeleteTimerTask( + ctx context.Context, + request *DeleteTimerTaskRequest, +) error { + return m.persistence.DeleteTimerTask(ctx, request) +} + func getStartVersion( versionHistories *VersionHistories, ) (int64, error) { diff --git a/common/persistence/execution_manager_test.go b/common/persistence/execution_manager_test.go index bab8e0253b6..dcffd475329 100644 --- a/common/persistence/execution_manager_test.go +++ b/common/persistence/execution_manager_test.go @@ -309,6 +309,7 @@ func TestExecutionManager_UpdateWorkflowExecution(t *testing.T) { mockedSerializer.EXPECT().SerializeEvent(activityScheduledEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowScheduledEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) + mockedSerializer.EXPECT().SerializeWorkflowTimerTasks(gomock.Any(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(2) mockedSerializer.EXPECT().SerializeResetPoints(generateResetPoints(), constants.EncodingTypeThriftRW).Return(expectedInfo.ExecutionInfo.AutoResetPoints, nil).Times(2) mockedSerializer.EXPECT().SerializeActiveClusterSelectionPolicy(generateActiveClusterSelectionPolicy(), constants.EncodingTypeThriftRW).Return(sampleActiveClusterSelectionPolicyData(), nil).Times(2) @@ -374,6 +375,7 @@ func TestSerializeWorkflowSnapshot(t *testing.T) { mockedSerializer.EXPECT().SerializeEvent(activityStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowScheduledEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) + mockedSerializer.EXPECT().SerializeWorkflowTimerTasks(gomock.Any(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeChecksum(gomock.Any(), gomock.Any()).Return(sampleCheckSumData(), nil).Times(1) mockedSerializer.EXPECT().SerializeActiveClusterSelectionPolicy(generateActiveClusterSelectionPolicy(), constants.EncodingTypeThriftRW).Return(sampleActiveClusterSelectionPolicyData(), nil).Times(1) }, @@ -386,6 +388,7 @@ func TestSerializeWorkflowSnapshot(t *testing.T) { { name: "nil info", prepareMocks: func(mockedSerializer *MockPayloadSerializer) { + mockedSerializer.EXPECT().SerializeWorkflowTimerTasks(gomock.Any(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeChecksum(gomock.Any(), gomock.Any()).Return(sampleTestCheckSumData(), nil).Times(1) }, input: &WorkflowSnapshot{}, @@ -393,6 +396,7 @@ func TestSerializeWorkflowSnapshot(t *testing.T) { assert.NoError(t, err) assert.Equal(t, &InternalWorkflowSnapshot{ ExecutionInfo: &InternalWorkflowExecutionInfo{}, + WorkflowTimerTasks: sampleEventData(), ChecksumData: sampleTestCheckSumData(), StartVersion: constants.EmptyVersion, LastWriteVersion: constants.EmptyVersion, @@ -946,6 +950,7 @@ func TestCreateWorkflowExecution(t *testing.T) { mockedSerializer.EXPECT().SerializeEvent(activityStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowScheduledEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) + mockedSerializer.EXPECT().SerializeWorkflowTimerTasks(gomock.Any(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeChecksum(gomock.Any(), gomock.Any()).Return(sampleCheckSumData(), nil).Times(1) mockedSerializer.EXPECT().SerializeActiveClusterSelectionPolicy(generateActiveClusterSelectionPolicy(), constants.EncodingTypeThriftRW).Return(sampleActiveClusterSelectionPolicyData(), nil).Times(1) }, @@ -976,6 +981,7 @@ func TestCreateWorkflowExecution(t *testing.T) { mockedSerializer.EXPECT().SerializeEvent(activityStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowScheduledEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) + mockedSerializer.EXPECT().SerializeWorkflowTimerTasks(gomock.Any(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeChecksum(gomock.Any(), gomock.Any()).Return(sampleCheckSumData(), nil).Times(1) mockedSerializer.EXPECT().SerializeActiveClusterSelectionPolicy(generateActiveClusterSelectionPolicy(), constants.EncodingTypeThriftRW).Return(sampleActiveClusterSelectionPolicyData(), nil).Times(1) @@ -1013,7 +1019,8 @@ func TestCreateWorkflowExecution(t *testing.T) { } manager := NewExecutionManagerImpl(mockedStore, testlogger.New(t), mockedSerializer, &DynamicConfiguration{ - SerializationEncoding: dynamicproperties.GetStringPropertyFn(string(constants.EncodingTypeThriftRW)), + SerializationEncoding: dynamicproperties.GetStringPropertyFn(string(constants.EncodingTypeThriftRW)), + EnableExecutionInfoTracking: dynamicproperties.GetBoolPropertyFn(false), }) res, err := manager.CreateWorkflowExecution(context.Background(), request) @@ -1062,6 +1069,7 @@ func TestConflictResolveWorkflowExecution(t *testing.T) { mockedSerializer.EXPECT().SerializeEvent(activityStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowScheduledEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) + mockedSerializer.EXPECT().SerializeWorkflowTimerTasks(gomock.Any(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeChecksum(gomock.Any(), gomock.Any()).Return(sampleCheckSumData(), nil).Times(1) mockedSerializer.EXPECT().SerializeActiveClusterSelectionPolicy(generateActiveClusterSelectionPolicy(), constants.EncodingTypeThriftRW).Return(sampleActiveClusterSelectionPolicyData(), nil).Times(1) }, @@ -1137,6 +1145,7 @@ func TestConflictResolveWorkflowExecution(t *testing.T) { mockedSerializer.EXPECT().SerializeEvent(activityStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(2) mockedSerializer.EXPECT().SerializeEvent(childWorkflowScheduledEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) mockedSerializer.EXPECT().SerializeEvent(childWorkflowStartedEvent(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(1) + mockedSerializer.EXPECT().SerializeWorkflowTimerTasks(gomock.Any(), constants.EncodingTypeThriftRW).Return(sampleEventData(), nil).Times(2) mockedSerializer.EXPECT().SerializeChecksum(gomock.Any(), gomock.Any()).Return(sampleCheckSumData(), nil).Times(2) mockedSerializer.EXPECT().SerializeActiveClusterSelectionPolicy(generateActiveClusterSelectionPolicy(), constants.EncodingTypeThriftRW).Return(sampleActiveClusterSelectionPolicyData(), nil).Times(2) }, @@ -1433,6 +1442,7 @@ func sampleInternalWorkflowMutation() *InternalWorkflowMutation { TimerID: "test-timer", }, }, + WorkflowTimerTasks: sampleEventData(), UpsertChildExecutionInfos: []*InternalChildExecutionInfo{ { DomainID: testDomainID, @@ -1576,8 +1586,9 @@ func sampleInternalWorkflowSnapshot() *InternalWorkflowSnapshot { WorkflowTypeName: testWorkflowType, }, }, - Checksum: generateChecksum(), - ChecksumData: sampleCheckSumData(), + WorkflowTimerTasks: sampleEventData(), + Checksum: generateChecksum(), + ChecksumData: sampleCheckSumData(), } } diff --git a/common/persistence/nosql/nosql_execution_store.go b/common/persistence/nosql/nosql_execution_store.go index 6da9e270e02..d750fac4aa8 100644 --- a/common/persistence/nosql/nosql_execution_store.go +++ b/common/persistence/nosql/nosql_execution_store.go @@ -970,3 +970,15 @@ func (d *nosqlExecutionStore) GetActiveClusterSelectionPolicy( return row.Policy, nil } + +func (d *nosqlExecutionStore) DeleteTimerTask( + ctx context.Context, + request *persistence.DeleteTimerTaskRequest, +) error { + return d.db.DeleteWorkflowTimerTask( + ctx, + d.shardID, + request.VisibilityTimestamp, + request.TaskID, + ) +} diff --git a/common/persistence/nosql/nosql_execution_store_util.go b/common/persistence/nosql/nosql_execution_store_util.go index 647950b9120..046b7481190 100644 --- a/common/persistence/nosql/nosql_execution_store_util.go +++ b/common/persistence/nosql/nosql_execution_store_util.go @@ -57,6 +57,7 @@ func (d *nosqlExecutionStore) prepareCreateWorkflowExecutionRequestWithMaps(newW if err != nil { return nil, err } + executionRequest.WorkflowTimerTasks = newWorkflow.WorkflowTimerTasks executionRequest.ChildWorkflowInfos, err = d.prepareChildWFInfosForWorkflowTxn(newWorkflow.ChildExecutionInfos) if err != nil { return nil, err @@ -122,7 +123,7 @@ func (d *nosqlExecutionStore) prepareResetWorkflowExecutionRequestWithMapsAndEve if err != nil { return nil, err } - // reset 6 maps + // reset maps executionRequest.ActivityInfos, err = d.prepareActivityInfosForWorkflowTxn(resetWorkflow.ActivityInfos) if err != nil { return nil, err @@ -131,6 +132,7 @@ func (d *nosqlExecutionStore) prepareResetWorkflowExecutionRequestWithMapsAndEve if err != nil { return nil, err } + executionRequest.WorkflowTimerTasks = resetWorkflow.WorkflowTimerTasks executionRequest.ChildWorkflowInfos, err = d.prepareChildWFInfosForWorkflowTxn(resetWorkflow.ChildExecutionInfos) if err != nil { return nil, err @@ -167,7 +169,7 @@ func (d *nosqlExecutionStore) prepareUpdateWorkflowExecutionRequestWithMapsAndEv return nil, err } - // merge 6 maps + // merge maps executionRequest.ActivityInfos, err = d.prepareActivityInfosForWorkflowTxn(workflowMutation.UpsertActivityInfos) if err != nil { return nil, err @@ -176,6 +178,7 @@ func (d *nosqlExecutionStore) prepareUpdateWorkflowExecutionRequestWithMapsAndEv if err != nil { return nil, err } + executionRequest.WorkflowTimerTasks = workflowMutation.WorkflowTimerTasks executionRequest.ChildWorkflowInfos, err = d.prepareChildWFInfosForWorkflowTxn(workflowMutation.UpsertChildExecutionInfos) if err != nil { return nil, err @@ -190,7 +193,7 @@ func (d *nosqlExecutionStore) prepareUpdateWorkflowExecutionRequestWithMapsAndEv } executionRequest.SignalRequestedIDs = workflowMutation.UpsertSignalRequestedIDs - // delete from 6 maps + // delete from maps executionRequest.ActivityInfoKeysToDelete = workflowMutation.DeleteActivityInfos executionRequest.TimerInfoKeysToDelete = workflowMutation.DeleteTimerInfos executionRequest.ChildWorkflowInfoKeysToDelete = workflowMutation.DeleteChildExecutionInfos @@ -506,6 +509,14 @@ func (d *nosqlExecutionStore) prepareTimerInfosForWorkflowTxn(timerInfo []*persi return m, nil } +func (d *nosqlExecutionStore) prepareWorkflowTimerTaskInfosForWorkflowTxn(workflowTimerTaskInfos []*persistence.WorkflowTimerTaskInfo) ([]*persistence.WorkflowTimerTaskInfo, error) { + m := []*persistence.WorkflowTimerTaskInfo{} + for _, info := range workflowTimerTaskInfos { + m = append(m, info) + } + return m, nil +} + func (d *nosqlExecutionStore) prepareChildWFInfosForWorkflowTxn(childWFInfos []*persistence.InternalChildExecutionInfo) (map[int64]*persistence.InternalChildExecutionInfo, error) { m := map[int64]*persistence.InternalChildExecutionInfo{} for _, c := range childWFInfos { diff --git a/common/persistence/nosql/nosqlplugin/cassandra/workflow.go b/common/persistence/nosql/nosqlplugin/cassandra/workflow.go index 1cd69b16a4e..002b839cf61 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/workflow.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/workflow.go @@ -228,6 +228,12 @@ func (db *CDB) SelectWorkflowExecution(ctx context.Context, shardID int, domainI } state.TimerInfos = timerInfos + if workflowTimerTasksData, ok := result["workflow_timer_tasks"].([]byte); ok && len(workflowTimerTasksData) > 0 { + if encoding, ok := result["workflow_timer_tasks_encoding"].(string); ok { + state.WorkflowTimerTasks = persistence.NewDataBlob(workflowTimerTasksData, constants.EncodingType(encoding)) + } + } + childExecutionInfos := make(map[int64]*persistence.InternalChildExecutionInfo) cMap := result["child_executions_map"].(map[int64]map[string]interface{}) for key, value := range cMap { @@ -525,6 +531,26 @@ func (db *CDB) DeleteTimerTask(ctx context.Context, shardID int, taskID int64, v return db.executeWithConsistencyAll(query) } +func (db *CDB) DeleteWorkflowTimerTask( + ctx context.Context, + shardID int, + visibilityTimestamp time.Time, + taskID int64, +) error { + ts := persistence.UnixNanoToDBTimestamp(visibilityTimestamp.UnixNano()) + query := db.session.Query(templateCompleteTimerTaskQuery, + shardID, + rowTypeTimerTask, + rowTypeTimerDomainID, + rowTypeTimerWorkflowID, + rowTypeTimerRunID, + ts, + taskID, + ).WithContext(ctx) + + return db.executeWithConsistencyAll(query) +} + func (db *CDB) RangeDeleteTimerTasks(ctx context.Context, shardID int, inclusiveMinTime, exclusiveMaxTime time.Time) error { start := persistence.UnixNanoToDBTimestamp(inclusiveMinTime.UnixNano()) end := persistence.UnixNanoToDBTimestamp(exclusiveMaxTime.UnixNano()) diff --git a/common/persistence/nosql/nosqlplugin/cassandra/workflow_cql.go b/common/persistence/nosql/nosqlplugin/cassandra/workflow_cql.go index 64a07ca3f3d..35052b011b3 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/workflow_cql.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/workflow_cql.go @@ -280,8 +280,8 @@ const ( `VALUES(?, ?, ?, ?, ?, ?, ?, ?, {run_id: ?, create_request_id: ?, state: ?, close_status: ?}, ?, ?, ?) IF NOT EXISTS USING TTL 0 ` templateCreateWorkflowExecutionWithVersionHistoriesQuery = `INSERT INTO executions (` + - `shard_id, domain_id, workflow_id, run_id, type, execution, next_event_id, visibility_ts, task_id, version_histories, version_histories_encoding, checksum, workflow_last_write_version, workflow_state, created_time) ` + - `VALUES(?, ?, ?, ?, ?, ` + templateWorkflowExecutionType + `, ?, ?, ?, ?, ?, ` + templateChecksumType + `, ?, ?, ?) IF NOT EXISTS ` + `shard_id, domain_id, workflow_id, run_id, type, execution, next_event_id, visibility_ts, task_id, workflow_timer_tasks, workflow_timer_tasks_encoding, version_histories, version_histories_encoding, checksum, workflow_last_write_version, workflow_state, created_time) ` + + `VALUES(?, ?, ?, ?, ?, ` + templateWorkflowExecutionType + `, ?, ?, ?, ?, ?, ?, ?, ` + templateChecksumType + `, ?, ?, ?) IF NOT EXISTS ` templateCreateTransferTaskQuery = `INSERT INTO executions (` + `shard_id, type, domain_id, workflow_id, run_id, transfer, data, data_encoding, visibility_ts, task_id, created_time) ` + @@ -309,7 +309,7 @@ const ( // TODO: remove replication_state after all 2DC workflows complete templateGetWorkflowExecutionQuery = `SELECT execution, replication_state, activity_map, timer_map, ` + - `child_executions_map, request_cancel_map, signal_map, signal_requested, buffered_events_list, ` + + `workflow_timer_tasks, workflow_timer_tasks_encoding, child_executions_map, request_cancel_map, signal_map, signal_requested, buffered_events_list, ` + `buffered_replication_tasks_map, version_histories, version_histories_encoding, checksum ` + `FROM executions ` + `WHERE shard_id = ? ` + @@ -412,6 +412,18 @@ const ( `and visibility_ts = ? ` + `and task_id = ? ` + templateUpdateWorkflowTimerTasksQuery = `UPDATE executions ` + + `SET workflow_timer_tasks = ? ` + + `, workflow_timer_tasks_encoding = ? ` + + `, last_updated_time = ? ` + + `WHERE shard_id = ? ` + + `and type = ? ` + + `and domain_id = ? ` + + `and workflow_id = ? ` + + `and run_id = ? ` + + `and visibility_ts = ? ` + + `and task_id = ? ` + templateUpdateChildExecutionInfoQuery = `UPDATE executions ` + `SET child_executions_map[ ? ] = ` + templateChildExecutionInfoType + ` ` + `, last_updated_time = ? ` + diff --git a/common/persistence/nosql/nosqlplugin/cassandra/workflow_test.go b/common/persistence/nosql/nosqlplugin/cassandra/workflow_test.go index 66e2f76afcb..95556851c5d 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/workflow_test.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/workflow_test.go @@ -595,6 +595,7 @@ func TestSelectWorkflowExecution(t *testing.T) { BufferedEvents: []*persistence.DataBlob{ {Encoding: constants.EncodingTypeThriftRW, Data: []byte("test-buffered-events-1")}, }, + WorkflowTimerTasks: nil, }, }, } diff --git a/common/persistence/nosql/nosqlplugin/cassandra/workflow_utils.go b/common/persistence/nosql/nosqlplugin/cassandra/workflow_utils.go index 5691d97f9a9..1ac5a64d93f 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/workflow_utils.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/workflow_utils.go @@ -978,6 +978,34 @@ func updateTimerInfos( return nil } +func updateWorkflowTimerTasks( + batch gocql.Batch, + shardID int, + domainID string, + workflowID string, + runID string, + workflowTimerTasks *persistence.DataBlob, + timeStamp time.Time, +) error { + if workflowTimerTasks == nil { + return nil + } + + batch.Query(templateUpdateWorkflowTimerTasksQuery, + workflowTimerTasks.Data, + workflowTimerTasks.GetEncodingString(), + timeStamp, + shardID, + rowTypeExecution, + domainID, + workflowID, + runID, + defaultVisibilityTimestamp, + rowTypeExecutionTaskID) + + return nil +} + func resetActivityInfos( batch gocql.Batch, shardID int, @@ -1163,6 +1191,10 @@ func createWorkflowExecutionWithMergeMaps( if err != nil { return err } + err = updateWorkflowTimerTasks(batch, shardID, domainID, workflowID, execution.RunID, execution.WorkflowTimerTasks, timeStamp) + if err != nil { + return err + } err = updateChildExecutionInfos(batch, shardID, domainID, workflowID, execution.RunID, execution.ChildWorkflowInfos, nil, timeStamp) if err != nil { return err @@ -1314,6 +1346,10 @@ func updateWorkflowExecutionAndEventBufferWithMergeAndDeleteMaps( if err != nil { return err } + err = updateWorkflowTimerTasks(batch, shardID, domainID, workflowID, execution.RunID, execution.WorkflowTimerTasks, timeStamp) + if err != nil { + return err + } err = updateChildExecutionInfos(batch, shardID, domainID, workflowID, execution.RunID, execution.ChildWorkflowInfos, execution.ChildWorkflowInfoKeysToDelete, timeStamp) if err != nil { return err @@ -1512,6 +1548,8 @@ func createWorkflowExecution( execution.NextEventID, defaultVisibilityTimestamp, rowTypeExecutionTaskID, + execution.WorkflowTimerTasks.GetData(), + execution.WorkflowTimerTasks.GetEncodingString(), execution.VersionHistories.Data, execution.VersionHistories.GetEncodingString(), execution.Checksums.Version, diff --git a/common/persistence/nosql/nosqlplugin/cassandra/workflow_utils_test.go b/common/persistence/nosql/nosqlplugin/cassandra/workflow_utils_test.go index 45a5986af12..104a48e3af4 100644 --- a/common/persistence/nosql/nosqlplugin/cassandra/workflow_utils_test.go +++ b/common/persistence/nosql/nosqlplugin/cassandra/workflow_utils_test.go @@ -2587,6 +2587,7 @@ func TestUpdateWorkflowExecution(t *testing.T) { CronOverlapPolicy: 0, }, PreviousNextEventIDCondition: common.Int64Ptr(10), + WorkflowTimerTasks: &persistence.DataBlob{}, VersionHistories: &persistence.DataBlob{}, Checksums: &checksum.Checksum{}, }, @@ -2673,11 +2674,12 @@ func TestCreateWorkflowExecution(t *testing.T) { CronOverlapPolicy: types.CronOverlapPolicyBufferOne, }, PreviousNextEventIDCondition: common.Int64Ptr(10), + WorkflowTimerTasks: &persistence.DataBlob{}, VersionHistories: &persistence.DataBlob{}, Checksums: &checksum.Checksum{}, }, wantQueries: []string{ - `INSERT INTO executions (shard_id, domain_id, workflow_id, run_id, type, execution, next_event_id, visibility_ts, task_id, version_histories, version_histories_encoding, checksum, workflow_last_write_version, workflow_state, created_time) ` + + `INSERT INTO executions (shard_id, domain_id, workflow_id, run_id, type, execution, next_event_id, visibility_ts, task_id, workflow_timer_tasks, workflow_timer_tasks_encoding, version_histories, version_histories_encoding, checksum, workflow_last_write_version, workflow_state, created_time) ` + `VALUES(1000, domain1, workflow1, runid1, 1, ` + `{domain_id: domain1, workflow_id: workflow1, run_id: runid1, first_run_id: , parent_domain_id: , parent_workflow_id: , ` + `parent_run_id: parentRunID1, initiated_id: 0, completion_event_batch_id: 0, completion_event: [], completion_event_data_encoding: , ` + @@ -2691,7 +2693,7 @@ func TestCreateWorkflowExecution(t *testing.T) { `backoff_coefficient: 0, max_interval: 0, expiration_time: 0001-01-01T00:00:00Z, max_attempts: 0, non_retriable_errors: [], ` + `event_store_version: 2, branch_token: [], cron_schedule: , cron_overlap_policy: 1, expiration_seconds: 0, search_attributes: map[], memo: map[], partition_config: map[], ` + `active_cluster_selection_policy: [116 104 114 105 102 116 45 101 110 99 111 100 101 100 45 97 99 116 105 118 101 45 99 108 117 115 116 101 114 45 115 101 108 101 99 116 105 111 110 45 112 111 108 105 99 121 45 100 97 116 97], active_cluster_selection_policy_encoding: thriftrw` + - `}, 0, 946684800000, -10, [], , {version: 0, flavor: 0, value: [] }, 0, 0, 2025-01-06T15:00:00Z) IF NOT EXISTS `, + `}, 0, 946684800000, -10, [], , [], , {version: 0, flavor: 0, value: [] }, 0, 0, 2025-01-06T15:00:00Z) IF NOT EXISTS `, }, }, } diff --git a/common/persistence/nosql/nosqlplugin/dynamodb/workflow.go b/common/persistence/nosql/nosqlplugin/dynamodb/workflow.go index 7b458576041..9659cff515d 100644 --- a/common/persistence/nosql/nosqlplugin/dynamodb/workflow.go +++ b/common/persistence/nosql/nosqlplugin/dynamodb/workflow.go @@ -156,3 +156,7 @@ func (db *ddb) SelectActiveClusterSelectionPolicy(ctx context.Context, shardID i func (db *ddb) DeleteActiveClusterSelectionPolicy(ctx context.Context, shardID int, domainID, wfID, rID string) error { panic("TODO") } + +func (db *ddb) DeleteWorkflowTimerTask(ctx context.Context, shardID int, visibilityTimestamp time.Time, taskID int64) error { + panic("TODO") +} diff --git a/common/persistence/nosql/nosqlplugin/interfaces.go b/common/persistence/nosql/nosqlplugin/interfaces.go index 1633e65172c..665f2cba124 100644 --- a/common/persistence/nosql/nosqlplugin/interfaces.go +++ b/common/persistence/nosql/nosqlplugin/interfaces.go @@ -478,6 +478,8 @@ type ( SelectTimerTasksOrderByVisibilityTime(ctx context.Context, shardID, pageSize int, pageToken []byte, inclusiveMinTime, exclusiveMaxTime time.Time) ([]*HistoryMigrationTask, []byte, error) // delete a single timer task DeleteTimerTask(ctx context.Context, shardID int, taskID int64, visibilityTimestamp time.Time) error + // delete a single workflow timer task with specific workflow identifiers + DeleteWorkflowTimerTask(ctx context.Context, shardID int, visibilityTimestamp time.Time, taskID int64) error // delete a range of timer tasks RangeDeleteTimerTasks(ctx context.Context, shardID int, inclusiveMinTime, exclusiveMaxTime time.Time) error diff --git a/common/persistence/nosql/nosqlplugin/interfaces_mock.go b/common/persistence/nosql/nosqlplugin/interfaces_mock.go index 19a058db572..2e6880284b0 100644 --- a/common/persistence/nosql/nosqlplugin/interfaces_mock.go +++ b/common/persistence/nosql/nosqlplugin/interfaces_mock.go @@ -373,6 +373,20 @@ func (mr *MockDBMockRecorder) DeleteWorkflowExecution(ctx, shardID, domainID, wo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowExecution", reflect.TypeOf((*MockDB)(nil).DeleteWorkflowExecution), ctx, shardID, domainID, workflowID, runID) } +// DeleteWorkflowTimerTask mocks base method. +func (m *MockDB) DeleteWorkflowTimerTask(ctx context.Context, shardID int, visibilityTimestamp time.Time, taskID int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteWorkflowTimerTask", ctx, shardID, visibilityTimestamp, taskID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteWorkflowTimerTask indicates an expected call of DeleteWorkflowTimerTask. +func (mr *MockDBMockRecorder) DeleteWorkflowTimerTask(ctx, shardID, visibilityTimestamp, taskID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowTimerTask", reflect.TypeOf((*MockDB)(nil).DeleteWorkflowTimerTask), ctx, shardID, visibilityTimestamp, taskID) +} + // GetQueueSize mocks base method. func (m *MockDB) GetQueueSize(ctx context.Context, queueType persistence.QueueType) (int64, error) { m.ctrl.T.Helper() @@ -1518,6 +1532,20 @@ func (mr *MocktableCRUDMockRecorder) DeleteWorkflowExecution(ctx, shardID, domai return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowExecution", reflect.TypeOf((*MocktableCRUD)(nil).DeleteWorkflowExecution), ctx, shardID, domainID, workflowID, runID) } +// DeleteWorkflowTimerTask mocks base method. +func (m *MocktableCRUD) DeleteWorkflowTimerTask(ctx context.Context, shardID int, visibilityTimestamp time.Time, taskID int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteWorkflowTimerTask", ctx, shardID, visibilityTimestamp, taskID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteWorkflowTimerTask indicates an expected call of DeleteWorkflowTimerTask. +func (mr *MocktableCRUDMockRecorder) DeleteWorkflowTimerTask(ctx, shardID, visibilityTimestamp, taskID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowTimerTask", reflect.TypeOf((*MocktableCRUD)(nil).DeleteWorkflowTimerTask), ctx, shardID, visibilityTimestamp, taskID) +} + // GetQueueSize mocks base method. func (m *MocktableCRUD) GetQueueSize(ctx context.Context, queueType persistence.QueueType) (int64, error) { m.ctrl.T.Helper() @@ -3316,6 +3344,20 @@ func (mr *MockWorkflowCRUDMockRecorder) DeleteWorkflowExecution(ctx, shardID, do return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowExecution", reflect.TypeOf((*MockWorkflowCRUD)(nil).DeleteWorkflowExecution), ctx, shardID, domainID, workflowID, runID) } +// DeleteWorkflowTimerTask mocks base method. +func (m *MockWorkflowCRUD) DeleteWorkflowTimerTask(ctx context.Context, shardID int, visibilityTimestamp time.Time, taskID int64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteWorkflowTimerTask", ctx, shardID, visibilityTimestamp, taskID) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteWorkflowTimerTask indicates an expected call of DeleteWorkflowTimerTask. +func (mr *MockWorkflowCRUDMockRecorder) DeleteWorkflowTimerTask(ctx, shardID, visibilityTimestamp, taskID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowTimerTask", reflect.TypeOf((*MockWorkflowCRUD)(nil).DeleteWorkflowTimerTask), ctx, shardID, visibilityTimestamp, taskID) +} + // InsertReplicationDLQTask mocks base method. func (m *MockWorkflowCRUD) InsertReplicationDLQTask(ctx context.Context, shardID int, sourceCluster string, task *HistoryMigrationTask) error { m.ctrl.T.Helper() diff --git a/common/persistence/nosql/nosqlplugin/mongodb/workflow.go b/common/persistence/nosql/nosqlplugin/mongodb/workflow.go index 46b6975ff69..05bf12f1cd0 100644 --- a/common/persistence/nosql/nosqlplugin/mongodb/workflow.go +++ b/common/persistence/nosql/nosqlplugin/mongodb/workflow.go @@ -156,3 +156,7 @@ func (db *mdb) SelectActiveClusterSelectionPolicy(ctx context.Context, shardID i func (db *mdb) DeleteActiveClusterSelectionPolicy(ctx context.Context, shardID int, domainID, wfID, rID string) error { panic("TODO") } + +func (db *mdb) DeleteWorkflowTimerTask(ctx context.Context, shardID int, visibilityTimestamp time.Time, taskID int64) error { + panic("TODO") +} diff --git a/common/persistence/nosql/nosqlplugin/types.go b/common/persistence/nosql/nosqlplugin/types.go index 6d946bbf0f4..28730eeb6b1 100644 --- a/common/persistence/nosql/nosqlplugin/types.go +++ b/common/persistence/nosql/nosqlplugin/types.go @@ -49,6 +49,7 @@ type ( // For WorkflowExecutionMapsWriteMode of create, update and reset ActivityInfos map[int64]*persistence.InternalActivityInfo TimerInfos map[string]*persistence.TimerInfo + WorkflowTimerTasks *persistence.DataBlob ChildWorkflowInfos map[int64]*persistence.InternalChildExecutionInfo RequestCancelInfos map[int64]*persistence.RequestCancelInfo SignalInfos map[int64]*persistence.SignalInfo diff --git a/common/persistence/persistence-tests/persistenceTestBase.go b/common/persistence/persistence-tests/persistenceTestBase.go index 2c0a09670d0..1dd0b7f89cb 100644 --- a/common/persistence/persistence-tests/persistenceTestBase.go +++ b/common/persistence/persistence-tests/persistenceTestBase.go @@ -162,6 +162,7 @@ func NewTestBaseWithNoSQL(t *testing.T, options *TestBaseOptions) *TestBase { ReadNoSQLShardFromDataBlob: dynamicproperties.GetBoolPropertyFn(true), DomainAuditLogTTL: func(domainID string) time.Duration { return time.Hour * 24 * 365 }, // 1 year default HistoryNodeDeleteBatchSize: dynamicproperties.GetIntPropertyFn(1000), + EnableExecutionInfoTracking: dynamicproperties.GetBoolPropertyFn(false), } params := TestBaseParams{ DefaultTestCluster: testCluster, @@ -196,6 +197,7 @@ func NewTestBaseWithSQL(t *testing.T, options *TestBaseOptions) *TestBase { ReadNoSQLShardFromDataBlob: dynamicproperties.GetBoolPropertyFn(true), DomainAuditLogTTL: func(domainID string) time.Duration { return time.Hour * 24 * 365 }, // 1 year default HistoryNodeDeleteBatchSize: dynamicproperties.GetIntPropertyFn(1000), + EnableExecutionInfoTracking: dynamicproperties.GetBoolPropertyFn(false), } params := TestBaseParams{ DefaultTestCluster: testCluster, diff --git a/common/persistence/serializer.go b/common/persistence/serializer.go index 7a339b6ab51..1d99eedf4ae 100644 --- a/common/persistence/serializer.go +++ b/common/persistence/serializer.go @@ -23,6 +23,7 @@ package persistence import ( "encoding/json" "fmt" + "time" "github.com/golang/snappy" @@ -30,6 +31,8 @@ import ( "github.com/uber/cadence/.gen/go/history" "github.com/uber/cadence/.gen/go/replicator" workflow "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/.gen/go/sqlblobs" + "github.com/uber/cadence/common" "github.com/uber/cadence/common/checksum" "github.com/uber/cadence/common/codec" "github.com/uber/cadence/common/constants" @@ -98,6 +101,10 @@ type ( // serialize/deserialize active cluster selection policy SerializeActiveClusterSelectionPolicy(policy *types.ActiveClusterSelectionPolicy, encodingType constants.EncodingType) (*DataBlob, error) DeserializeActiveClusterSelectionPolicy(data *DataBlob) (*types.ActiveClusterSelectionPolicy, error) + + // serialize/deserialize workflow timer tasks + SerializeWorkflowTimerTasks(tasks []*WorkflowTimerTaskInfo, encodingType constants.EncodingType) (*DataBlob, error) + DeserializeWorkflowTimerTasks(data *DataBlob) ([]*WorkflowTimerTaskInfo, error) } // CadenceSerializationError is an error type for cadence serialization @@ -376,6 +383,25 @@ func (t *serializerImpl) DeserializeActiveClusterSelectionPolicy(data *DataBlob) return &policy, err } +func (t *serializerImpl) SerializeWorkflowTimerTasks(tasks []*WorkflowTimerTaskInfo, encodingType constants.EncodingType) (*DataBlob, error) { + if len(tasks) == 0 { + return nil, nil + } + return t.serialize(tasks, encodingType) +} + +func (t *serializerImpl) DeserializeWorkflowTimerTasks(data *DataBlob) ([]*WorkflowTimerTaskInfo, error) { + if data == nil { + return nil, nil + } + var tasks []*WorkflowTimerTaskInfo + if len(data.Data) == 0 { + return tasks, nil + } + err := t.deserialize(data, &tasks) + return tasks, err +} + func (t *serializerImpl) serialize(input interface{}, encodingType constants.EncodingType) (*DataBlob, error) { if input == nil { return nil, nil @@ -431,6 +457,8 @@ func (t *serializerImpl) thriftrwEncode(input interface{}) ([]byte, error) { return t.thriftrwEncoder.Encode(thrift.FromActiveClusters(input)) case *types.ActiveClusterSelectionPolicy: return t.thriftrwEncoder.Encode(thrift.FromActiveClusterSelectionPolicy(input)) + case []*WorkflowTimerTaskInfo: + return t.thriftrwEncoder.Encode(workflowTimerTaskInfoToThrift(input)) default: return nil, nil } @@ -555,6 +583,13 @@ func (t *serializerImpl) thriftrwDecode(data []byte, target interface{}) error { } *target = *thrift.ToActiveClusterSelectionPolicy(&thriftTarget) return nil + case *[]*WorkflowTimerTaskInfo: + thriftTarget := sqlblobs.WorkflowTimerTaskInfo{} + if err := t.thriftrwEncoder.Decode(data, &thriftTarget); err != nil { + return err + } + *target = workflowTimerTaskInfoFromThrift(&thriftTarget) + return nil default: return nil } @@ -607,3 +642,40 @@ func NewCadenceDeserializationError(msg string) *CadenceDeserializationError { func (e *CadenceDeserializationError) Error() string { return fmt.Sprintf("cadence deserialization error: %v", e.msg) } + +// workflowTimerTaskInfoToThrift converts persistence WorkflowTimerTaskInfo slice to thrift wrapper type +func workflowTimerTaskInfoToThrift(tasks []*WorkflowTimerTaskInfo) *sqlblobs.WorkflowTimerTaskInfo { + refs := make([]*sqlblobs.TimerReference, 0, len(tasks)) + for _, task := range tasks { + if task == nil { + continue + } + refs = append(refs, &sqlblobs.TimerReference{ + TaskID: common.Int64Ptr(task.TaskID), + VisibilityTimestamp: common.Int64Ptr(task.VisibilityTimestamp.UnixNano()), + TimeoutType: common.Int16Ptr(int16(task.TimeoutType)), + }) + } + return &sqlblobs.WorkflowTimerTaskInfo{ + References: refs, + } +} + +// workflowTimerTaskInfoFromThrift converts thrift wrapper type to persistence WorkflowTimerTaskInfo slice +func workflowTimerTaskInfoFromThrift(info *sqlblobs.WorkflowTimerTaskInfo) []*WorkflowTimerTaskInfo { + if info == nil || info.References == nil { + return nil + } + tasks := make([]*WorkflowTimerTaskInfo, 0, len(info.References)) + for _, ref := range info.References { + if ref == nil { + continue + } + tasks = append(tasks, &WorkflowTimerTaskInfo{ + TaskID: ref.GetTaskID(), + VisibilityTimestamp: time.Unix(0, ref.GetVisibilityTimestamp()), + TimeoutType: int(ref.GetTimeoutType()), + }) + } + return tasks +} diff --git a/common/persistence/serializer_mock.go b/common/persistence/serializer_mock.go index 024a9f339b4..2f530073a80 100644 --- a/common/persistence/serializer_mock.go +++ b/common/persistence/serializer_mock.go @@ -253,6 +253,21 @@ func (mr *MockPayloadSerializerMockRecorder) DeserializeVisibilityMemo(data any) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeserializeVisibilityMemo", reflect.TypeOf((*MockPayloadSerializer)(nil).DeserializeVisibilityMemo), data) } +// DeserializeWorkflowTimerTasks mocks base method. +func (m *MockPayloadSerializer) DeserializeWorkflowTimerTasks(data *DataBlob) ([]*WorkflowTimerTaskInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeserializeWorkflowTimerTasks", data) + ret0, _ := ret[0].([]*WorkflowTimerTaskInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeserializeWorkflowTimerTasks indicates an expected call of DeserializeWorkflowTimerTasks. +func (mr *MockPayloadSerializerMockRecorder) DeserializeWorkflowTimerTasks(data any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeserializeWorkflowTimerTasks", reflect.TypeOf((*MockPayloadSerializer)(nil).DeserializeWorkflowTimerTasks), data) +} + // SerializeActiveClusterSelectionPolicy mocks base method. func (m *MockPayloadSerializer) SerializeActiveClusterSelectionPolicy(policy *types.ActiveClusterSelectionPolicy, encodingType constants.EncodingType) (*DataBlob, error) { m.ctrl.T.Helper() @@ -462,3 +477,18 @@ func (mr *MockPayloadSerializerMockRecorder) SerializeVisibilityMemo(memo, encod mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SerializeVisibilityMemo", reflect.TypeOf((*MockPayloadSerializer)(nil).SerializeVisibilityMemo), memo, encodingType) } + +// SerializeWorkflowTimerTasks mocks base method. +func (m *MockPayloadSerializer) SerializeWorkflowTimerTasks(tasks []*WorkflowTimerTaskInfo, encodingType constants.EncodingType) (*DataBlob, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SerializeWorkflowTimerTasks", tasks, encodingType) + ret0, _ := ret[0].(*DataBlob) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SerializeWorkflowTimerTasks indicates an expected call of SerializeWorkflowTimerTasks. +func (mr *MockPayloadSerializerMockRecorder) SerializeWorkflowTimerTasks(tasks, encodingType any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SerializeWorkflowTimerTasks", reflect.TypeOf((*MockPayloadSerializer)(nil).SerializeWorkflowTimerTasks), tasks, encodingType) +} diff --git a/common/persistence/sql/sql_execution_store.go b/common/persistence/sql/sql_execution_store.go index eadbc22a9cd..ca015d1c9c7 100644 --- a/common/persistence/sql/sql_execution_store.go +++ b/common/persistence/sql/sql_execution_store.go @@ -1434,3 +1434,13 @@ func (m *sqlExecutionStore) DeleteActiveClusterSelectionPolicy( // It requires creating a new table in the database to store the active cluster selection policy return nil } + +func (m *sqlExecutionStore) DeleteTimerTask( + ctx context.Context, + request *p.DeleteTimerTaskRequest, +) error { + // TODO: Implement timer task deletion for SQL stores. See issue #7567 + // This should delete timer task records (both workflow timers and user timers) from the timer_tasks table. + // Reference implementation: common/persistence/nosql/nosqlplugin/cassandra/workflow.go:523-536 + return nil +} diff --git a/common/persistence/wrappers/errorinjectors/execution_generated.go b/common/persistence/wrappers/errorinjectors/execution_generated.go index 86f2ab84fd2..674e953e2c3 100644 --- a/common/persistence/wrappers/errorinjectors/execution_generated.go +++ b/common/persistence/wrappers/errorinjectors/execution_generated.go @@ -146,6 +146,21 @@ func (c *injectorExecutionManager) DeleteReplicationTaskFromDLQ(ctx context.Cont return } +func (c *injectorExecutionManager) DeleteTimerTask(ctx context.Context, request *persistence.DeleteTimerTaskRequest) (err error) { + fakeErr := generateFakeError(c.errorRate, c.starttime) + var forwardCall bool + if forwardCall = shouldForwardCallToPersistence(fakeErr); forwardCall { + err = c.wrapped.DeleteTimerTask(ctx, request) + } + + if fakeErr != nil { + logErr(c.logger, "ExecutionManager.DeleteTimerTask", fakeErr, forwardCall, err) + err = fakeErr + return + } + return +} + func (c *injectorExecutionManager) DeleteWorkflowExecution(ctx context.Context, request *persistence.DeleteWorkflowExecutionRequest) (err error) { fakeErr := generateFakeError(c.errorRate, c.starttime) var forwardCall bool diff --git a/common/persistence/wrappers/errorinjectors/injectors_test.go b/common/persistence/wrappers/errorinjectors/injectors_test.go index 0a9d0a0cf4f..265f9abd267 100644 --- a/common/persistence/wrappers/errorinjectors/injectors_test.go +++ b/common/persistence/wrappers/errorinjectors/injectors_test.go @@ -299,6 +299,7 @@ func builderForPassThrough(t *testing.T, injector any, errorRate float64, logger mocked.EXPECT().RangeDeleteReplicationTaskFromDLQ(gomock.Any(), gomock.Any()).Return(&persistence.RangeDeleteReplicationTaskFromDLQResponse{}, expectedErr) mocked.EXPECT().GetActiveClusterSelectionPolicy(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.ActiveClusterSelectionPolicy{}, expectedErr) mocked.EXPECT().DeleteActiveClusterSelectionPolicy(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedErr) + mocked.EXPECT().DeleteTimerTask(gomock.Any(), gomock.Any()).Return(expectedErr) } default: t.Errorf("unsupported type %v", reflect.TypeOf(injector)) diff --git a/common/persistence/wrappers/errorinjectors/utils.go b/common/persistence/wrappers/errorinjectors/utils.go index 2d6a5384a64..4d5b09442da 100644 --- a/common/persistence/wrappers/errorinjectors/utils.go +++ b/common/persistence/wrappers/errorinjectors/utils.go @@ -256,6 +256,8 @@ func executionManagerTags(op string) *tag.Tag { return &tag.StoreOperationGetTimerIndexTasks case "ExecutionManager.CompleteTimerTask": return &tag.StoreOperationCompleteTimerTask + case "ExecutionManager.DeleteTimerTask": + return &tag.StoreOperationDeleteTimerTask case "ExecutionManager.CreateFailoverMarkerTasks": return &tag.StoreOperationCreateFailoverMarkerTasks case "ExecutionManager.GetActiveClusterSelectionPolicy": diff --git a/common/persistence/wrappers/metered/execution_generated.go b/common/persistence/wrappers/metered/execution_generated.go index 3b2e8d94f2a..52ae65f7952 100644 --- a/common/persistence/wrappers/metered/execution_generated.go +++ b/common/persistence/wrappers/metered/execution_generated.go @@ -222,6 +222,30 @@ func (c *meteredExecutionManager) DeleteReplicationTaskFromDLQ(ctx context.Conte return } +func (c *meteredExecutionManager) DeleteTimerTask(ctx context.Context, request *persistence.DeleteTimerTaskRequest) (err error) { + op := func() error { + err = c.wrapped.DeleteTimerTask(ctx, request) + return err + } + + retryCount := getRetryCountFromContext(ctx) + if domainName, hasDomainName := getDomainNameFromRequest(request); hasDomainName { + logTags := append([]tag.Tag{tag.WorkflowDomainName(domainName)}, getCustomLogTags(request)...) + c.logger.SampleInfo("Persistence DeleteTimerTask called", c.sampleLoggingRate(), logTags...) + if c.enableShardIDMetrics() { + err = c.callWithDomainAndShardScope(metrics.PersistenceDeleteTimerTaskScope, op, metrics.DomainTag(domainName), + metrics.ShardIDTag(c.GetShardID()), metrics.IsRetryTag(retryCount > 0)) + } else { + err = c.call(metrics.PersistenceDeleteTimerTaskScope, op, metrics.DomainTag(domainName), metrics.IsRetryTag(retryCount > 0)) + } + return + } + + err = c.callWithoutDomainTag(metrics.PersistenceDeleteTimerTaskScope, op, append(getCustomMetricTags(request), metrics.IsRetryTag(retryCount > 0))...) + + return +} + func (c *meteredExecutionManager) DeleteWorkflowExecution(ctx context.Context, request *persistence.DeleteWorkflowExecutionRequest) (err error) { op := func() error { err = c.wrapped.DeleteWorkflowExecution(ctx, request) diff --git a/common/persistence/wrappers/metered/metered_test.go b/common/persistence/wrappers/metered/metered_test.go index 4aa1adfaf1d..ecd48e4bee5 100644 --- a/common/persistence/wrappers/metered/metered_test.go +++ b/common/persistence/wrappers/metered/metered_test.go @@ -277,6 +277,7 @@ func prepareMockForTest(t *testing.T, input interface{}, expectedErr error) { mocked.EXPECT().RangeDeleteReplicationTaskFromDLQ(gomock.Any(), gomock.Any()).Return(&persistence.RangeDeleteReplicationTaskFromDLQResponse{}, expectedErr).Times(1) mocked.EXPECT().GetActiveClusterSelectionPolicy(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.ActiveClusterSelectionPolicy{}, expectedErr).Times(1) mocked.EXPECT().DeleteActiveClusterSelectionPolicy(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedErr).Times(1) + mocked.EXPECT().DeleteTimerTask(gomock.Any(), gomock.Any()).Return(expectedErr).Times(1) default: t.Errorf("unsupported type %v", reflect.TypeOf(input)) t.FailNow() diff --git a/common/persistence/wrappers/ratelimited/execution_generated.go b/common/persistence/wrappers/ratelimited/execution_generated.go index 1c4c2c806a9..b9908ad2000 100644 --- a/common/persistence/wrappers/ratelimited/execution_generated.go +++ b/common/persistence/wrappers/ratelimited/execution_generated.go @@ -125,6 +125,18 @@ func (c *ratelimitedExecutionManager) DeleteReplicationTaskFromDLQ(ctx context.C return c.wrapped.DeleteReplicationTaskFromDLQ(ctx, request) } +func (c *ratelimitedExecutionManager) DeleteTimerTask(ctx context.Context, request *persistence.DeleteTimerTaskRequest) (err error) { + if c.metricsClient != nil { + scope := c.metricsClient.Scope(metrics.PersistenceCreateShardScope, metrics.DatastoreTag(c.datastoreName)) + scope.UpdateGauge(metrics.PersistenceQuota, float64(c.rateLimiter.Limit())) + } + if ok := c.rateLimiter.Allow(); !ok { + err = ErrPersistenceLimitExceeded + return + } + return c.wrapped.DeleteTimerTask(ctx, request) +} + func (c *ratelimitedExecutionManager) DeleteWorkflowExecution(ctx context.Context, request *persistence.DeleteWorkflowExecutionRequest) (err error) { if c.metricsClient != nil { scope := c.metricsClient.Scope(metrics.PersistenceCreateShardScope, metrics.DatastoreTag(c.datastoreName)) diff --git a/common/persistence/wrappers/ratelimited/wrappers_test.go b/common/persistence/wrappers/ratelimited/wrappers_test.go index ef604c17432..5c15c8038d9 100644 --- a/common/persistence/wrappers/ratelimited/wrappers_test.go +++ b/common/persistence/wrappers/ratelimited/wrappers_test.go @@ -277,6 +277,7 @@ func builderForPassThrough(t *testing.T, injector any, limiter quotas.Limiter, e mocked.EXPECT().RangeDeleteReplicationTaskFromDLQ(gomock.Any(), gomock.Any()).Return(&persistence.RangeDeleteReplicationTaskFromDLQResponse{}, expectedErr) mocked.EXPECT().GetActiveClusterSelectionPolicy(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.ActiveClusterSelectionPolicy{}, expectedErr) mocked.EXPECT().DeleteActiveClusterSelectionPolicy(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedErr) + mocked.EXPECT().DeleteTimerTask(gomock.Any(), gomock.Any()).Return(expectedErr) } default: t.Errorf("unsupported type %v", reflect.TypeOf(injector)) diff --git a/config/dynamicconfig/development.yaml b/config/dynamicconfig/development.yaml index 1766e763606..f84148ea7d0 100644 --- a/config/dynamicconfig/development.yaml +++ b/config/dynamicconfig/development.yaml @@ -93,3 +93,8 @@ shardDistributor.migrationMode: - value: "distributed_pass" constraints: namespace: "test-external-assignment" +system.workflowDeletionJitterRange: +- value: 1 + constraints: {} +system.enableExecutionInfoTracking: +- value: true \ No newline at end of file diff --git a/go.mod b/go.mod index 58e9a0a76a1..ac5f1573968 100644 --- a/go.mod +++ b/go.mod @@ -174,3 +174,5 @@ replace github.com/apache/thrift => github.com/apache/thrift v0.0.0-201612212036 // DO NOT USE as it misses mysql/config store fix retract v1.2.3 + +replace github.com/uber/cadence-idl => github.com/davidporter-id-au/cadence-idl v0.0.0-20260126051300-ab86e9511e83 diff --git a/go.sum b/go.sum index 0907abc4953..8d7795f6063 100644 --- a/go.sum +++ b/go.sum @@ -72,6 +72,8 @@ github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWE github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davidporter-id-au/cadence-idl v0.0.0-20260126051300-ab86e9511e83 h1:2jD2y1qO2DrI1jJ8r9Tbf/eTcHvc+O7iicwZFe3sSpc= +github.com/davidporter-id-au/cadence-idl v0.0.0-20260126051300-ab86e9511e83/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -452,9 +454,6 @@ github.com/uber-go/mapdecode v1.0.0/go.mod h1:b5nP15FwXTgpjTjeA9A2uTHXV5UJCl4arw github.com/uber-go/tally v3.3.12+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= github.com/uber-go/tally v3.3.15+incompatible h1:9hLSgNBP28CjIaDmAuRTq9qV+UZY+9PcvAkXO4nNMwg= github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU= -github.com/uber/cadence-idl v0.0.0-20211111101836-d6b70b60eb8c/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= -github.com/uber/cadence-idl v0.0.0-20260115163036-f68403083e26 h1:ayljsfgiQNLzoA1Bn29LyRPhFdU9gPKq/1zCUjC8cHE= -github.com/uber/cadence-idl v0.0.0-20260115163036-f68403083e26/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= github.com/uber/jaeger-client-go v2.22.1+incompatible h1:NHcubEkVbahf9t3p75TOCR83gdUHXjRJvjoBh1yACsM= github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= diff --git a/go.work.sum b/go.work.sum index 61f1ab4ee38..217678a4f62 100644 --- a/go.work.sum +++ b/go.work.sum @@ -365,6 +365,8 @@ github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e h1:l99YKCdrK4Lvb/z github.com/dave/jennifer v1.2.0 h1:S15ZkFMRoJ36mGAQgWL1tnr0NQJh9rZ8qatseX/VbBc= github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e h1:xURkGi4RydhyaYR6PzcyHTueQudxY4LgxN1oYEPJHa0= github.com/dave/rebecca v0.9.1 h1:jxVfdOxRirbXL28vXMvUvJ1in3djwkVKXCq339qhBL0= +github.com/davidporter-id-au/cadence-idl v0.0.0-20260126051300-ab86e9511e83 h1:2jD2y1qO2DrI1jJ8r9Tbf/eTcHvc+O7iicwZFe3sSpc= +github.com/davidporter-id-au/cadence-idl v0.0.0-20260126051300-ab86e9511e83/go.mod h1:oyUK7GCNCRHCCyWyzifSzXpVrRYVBbAMHAzF5dXiKws= github.com/dchest/siphash v1.2.3 h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA= github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc= github.com/denis-tingajkin/go-header v0.4.2 h1:jEeSF4sdv8/3cT/WY8AgDHUoItNSoEZ7qg9dX7pc218= diff --git a/idls b/idls index f68403083e2..ab86e9511e8 160000 --- a/idls +++ b/idls @@ -1 +1 @@ -Subproject commit f68403083e269a5d3fa5300c40072bb06545248f +Subproject commit ab86e9511e8361dbb2bd7bb418a7138c3c2d94b4 diff --git a/schema/cassandra/cadence/schema.cql b/schema/cassandra/cadence/schema.cql index 3575a90a14f..0cd7ab17944 100644 --- a/schema/cassandra/cadence/schema.cql +++ b/schema/cassandra/cadence/schema.cql @@ -375,6 +375,8 @@ CREATE TABLE executions ( range_id bigint, -- Increasing sequence identifier for transfer queue, checkpointed into shard info activity_map map>, timer_map map>, + workflow_timer_tasks blob, + workflow_timer_tasks_encoding text, child_executions_map map>, request_cancel_map map>, signal_map map>, diff --git a/schema/cassandra/cadence/versioned/v0.45/manifest.json b/schema/cassandra/cadence/versioned/v0.45/manifest.json new file mode 100644 index 00000000000..d92941ed330 --- /dev/null +++ b/schema/cassandra/cadence/versioned/v0.45/manifest.json @@ -0,0 +1,8 @@ +{ + "CurrVersion": "0.45", + "MinCompatibleVersion": "0.45", + "Description": "Adding workflow_timer_task_info type and workflow_timer_task_map column to track workflow-level timer tasks for cleanup during workflow deletion, and add timer_task_id to timer_info type for user timer cleanup", + "SchemaUpdateCqlFiles": [ + "workflow_timer_task_info.cql" + ] +} diff --git a/schema/cassandra/cadence/versioned/v0.45/workflow_timer_task_info.cql b/schema/cassandra/cadence/versioned/v0.45/workflow_timer_task_info.cql new file mode 100644 index 00000000000..a8a38ffd491 --- /dev/null +++ b/schema/cassandra/cadence/versioned/v0.45/workflow_timer_task_info.cql @@ -0,0 +1,3 @@ +-- Add blob-encoded workflow timer tasks fields +ALTER TABLE executions ADD workflow_timer_tasks blob; +ALTER TABLE executions ADD workflow_timer_tasks_encoding text; diff --git a/schema/cassandra/version.go b/schema/cassandra/version.go index ca1ea5418fb..ad289465f51 100644 --- a/schema/cassandra/version.go +++ b/schema/cassandra/version.go @@ -23,7 +23,7 @@ package cassandra // NOTE: whenever there is a new data base schema update, plz update the following versions // Version is the Cassandra database release version -const Version = "0.44" +const Version = "0.45" // VisibilityVersion is the Cassandra visibility database release version const VisibilityVersion = "0.10" diff --git a/service/history/config/config.go b/service/history/config/config.go index b005028bbf3..41a9434e47f 100644 --- a/service/history/config/config.go +++ b/service/history/config/config.go @@ -60,6 +60,7 @@ type Config struct { ShutdownDrainDuration dynamicproperties.DurationPropertyFn WorkflowDeletionJitterRange dynamicproperties.IntPropertyFnWithDomainFilter DeleteHistoryEventContextTimeout dynamicproperties.IntPropertyFn + EnableExecutionInfoTracking dynamicproperties.BoolPropertyFn MaxResponseSize int // HistoryCache settings @@ -294,6 +295,8 @@ type Config struct { EnableConsistentQueryByDomain dynamicproperties.BoolPropertyFnWithDomainFilter MaxBufferedQueryCount dynamicproperties.IntPropertyFn + TaskCleanupTimeoutThreshold dynamicproperties.DurationPropertyFn + // EnableContextHeaderInVisibility whether to enable indexing context header in visibility EnableContextHeaderInVisibility dynamicproperties.BoolPropertyFnWithDomainFilter @@ -394,6 +397,7 @@ func New(dc *dynamicconfig.Collection, numberOfShards int, maxMessageSize int, i StandbyTaskMissingEventsResendDelay: dc.GetDurationProperty(dynamicproperties.StandbyTaskMissingEventsResendDelay), StandbyTaskMissingEventsDiscardDelay: dc.GetDurationProperty(dynamicproperties.StandbyTaskMissingEventsDiscardDelay), WorkflowDeletionJitterRange: dc.GetIntPropertyFilteredByDomain(dynamicproperties.WorkflowDeletionJitterRange), + EnableExecutionInfoTracking: dc.GetBoolProperty(dynamicproperties.EnableExecutionInfoTracking), DeleteHistoryEventContextTimeout: dc.GetIntProperty(dynamicproperties.DeleteHistoryEventContextTimeout), MaxResponseSize: maxMessageSize, @@ -567,6 +571,7 @@ func New(dc *dynamicconfig.Collection, numberOfShards int, maxMessageSize int, i EnableConsistentQuery: dc.GetBoolProperty(dynamicproperties.EnableConsistentQuery), EnableConsistentQueryByDomain: dc.GetBoolPropertyFilteredByDomain(dynamicproperties.EnableConsistentQueryByDomain), + TaskCleanupTimeoutThreshold: dc.GetDurationProperty(dynamicproperties.TaskCleanupTimeoutThreshold), EnableContextHeaderInVisibility: dc.GetBoolPropertyFilteredByDomain(dynamicproperties.EnableContextHeaderInVisibility), EnableCrossClusterOperationsForDomain: dc.GetBoolPropertyFilteredByDomain(dynamicproperties.EnableCrossClusterOperationsForDomain), MaxBufferedQueryCount: dc.GetIntProperty(dynamicproperties.MaxBufferedQueryCount), diff --git a/service/history/config/config_test.go b/service/history/config/config_test.go index 0e99b7cd71f..8f424fa8689 100644 --- a/service/history/config/config_test.go +++ b/service/history/config/config_test.go @@ -75,6 +75,7 @@ func TestNewConfig(t *testing.T) { "ShutdownDrainDuration": {dynamicproperties.HistoryShutdownDrainDuration, time.Second}, "WorkflowDeletionJitterRange": {dynamicproperties.WorkflowDeletionJitterRange, 20}, "DeleteHistoryEventContextTimeout": {dynamicproperties.DeleteHistoryEventContextTimeout, 21}, + "EnableExecutionInfoTracking": {dynamicproperties.EnableExecutionInfoTracking, true}, "MaxResponseSize": {nil, maxMessageSize}, "HistoryCacheInitialSize": {dynamicproperties.HistoryCacheInitialSize, 22}, "HistoryCacheMaxSize": {dynamicproperties.HistoryCacheMaxSize, 23}, @@ -233,6 +234,7 @@ func TestNewConfig(t *testing.T) { "WorkflowIDInternalRPS": {dynamicproperties.WorkflowIDInternalRPS, 88}, "EnableConsistentQuery": {dynamicproperties.EnableConsistentQuery, true}, "EnableConsistentQueryByDomain": {dynamicproperties.EnableConsistentQueryByDomain, true}, + "TaskCleanupTimeoutThreshold": {dynamicproperties.TaskCleanupTimeoutThreshold, time.Second}, "MaxBufferedQueryCount": {dynamicproperties.MaxBufferedQueryCount, 89}, "EnableContextHeaderInVisibility": {dynamicproperties.EnableContextHeaderInVisibility, true}, "EnableCrossClusterOperationsForDomain": {dynamicproperties.EnableCrossClusterOperationsForDomain, true}, diff --git a/service/history/execution/mutable_state.go b/service/history/execution/mutable_state.go index 1d44a409285..42356ed12ea 100644 --- a/service/history/execution/mutable_state.go +++ b/service/history/execution/mutable_state.go @@ -143,6 +143,7 @@ type ( GetPendingChildExecutionInfos() map[int64]*persistence.ChildExecutionInfo GetPendingRequestCancelExternalInfos() map[int64]*persistence.RequestCancelInfo GetPendingSignalExternalInfos() map[int64]*persistence.SignalInfo + GetPendingWorkflowTimerTaskInfos() map[int]*persistence.WorkflowTimerTaskInfo GetRequestCancelInfo(int64) (*persistence.RequestCancelInfo, bool) GetRetryBackoffDuration(errReason string) time.Duration GetCronBackoffDuration(context.Context) (time.Duration, error) diff --git a/service/history/execution/mutable_state_builder.go b/service/history/execution/mutable_state_builder.go index e5e2514cc50..f32dc6d146c 100644 --- a/service/history/execution/mutable_state_builder.go +++ b/service/history/execution/mutable_state_builder.go @@ -111,6 +111,8 @@ type ( updateSignalRequestedIDs map[string]struct{} // Set of signaled requestIds since last update deleteSignalRequestedIDs map[string]struct{} // Deleted signaled requestIds + workflowTimerTaskInfos []*persistence.WorkflowTimerTaskInfo // Workflow timer task infos loaded from persistence + bufferedEvents []*types.HistoryEvent // buffered history events that are already persisted updateBufferedEvents []*types.HistoryEvent // buffered history events that needs to be persisted clearBufferedEvents bool // delete buffered events from persistence @@ -322,6 +324,7 @@ func (e *mutableStateBuilder) CopyToPersistence() *persistence.WorkflowMutableSt state.Checksum = e.checksum state.ReplicationState = e.replicationState state.ExecutionStats = e.executionStats + state.WorkflowTimerTaskInfos = e.workflowTimerTaskInfos return state } @@ -343,6 +346,7 @@ func (e *mutableStateBuilder) Load( e.pendingRequestCancelInfoIDs = state.RequestCancelInfos e.pendingSignalInfoIDs = state.SignalInfos e.pendingSignalRequestedIDs = state.SignalRequestedIDs + e.workflowTimerTaskInfos = state.WorkflowTimerTaskInfos e.executionInfo = state.ExecutionInfo e.bufferedEvents = e.reorderAndFilterDuplicateEvents(state.BufferedEvents, "load") diff --git a/service/history/execution/mutable_state_builder_methods_timer.go b/service/history/execution/mutable_state_builder_methods_timer.go index 1792534955d..fb6e34cd6d2 100644 --- a/service/history/execution/mutable_state_builder_methods_timer.go +++ b/service/history/execution/mutable_state_builder_methods_timer.go @@ -282,3 +282,11 @@ func checkAndClearTimerFiredEvent( timerEvent := events[timerFiredIdx] return append(events[:timerFiredIdx], events[timerFiredIdx+1:]...), timerEvent } + +func (e *mutableStateBuilder) GetPendingWorkflowTimerTaskInfos() map[int]*persistence.WorkflowTimerTaskInfo { + result := make(map[int]*persistence.WorkflowTimerTaskInfo) + for _, info := range e.workflowTimerTaskInfos { + result[info.TimeoutType] = info + } + return result +} diff --git a/service/history/execution/mutable_state_builder_test.go b/service/history/execution/mutable_state_builder_test.go index 40419b6ec3f..293e8e946f6 100644 --- a/service/history/execution/mutable_state_builder_test.go +++ b/service/history/execution/mutable_state_builder_test.go @@ -3500,6 +3500,7 @@ func TestCloseTransactionAsMutation(t *testing.T) { DeleteActivityInfos: []int64{}, UpsertTimerInfos: []*persistence.TimerInfo{}, DeleteTimerInfos: []string{}, + WorkflowTimerTasks: nil, UpsertChildExecutionInfos: []*persistence.ChildExecutionInfo{}, UpsertRequestCancelInfos: []*persistence.RequestCancelInfo{}, DeleteRequestCancelInfos: []int64{}, @@ -3583,6 +3584,7 @@ func TestCloseTransactionAsMutation(t *testing.T) { DeleteActivityInfos: []int64{}, UpsertTimerInfos: []*persistence.TimerInfo{}, DeleteTimerInfos: []string{}, + WorkflowTimerTasks: nil, UpsertChildExecutionInfos: []*persistence.ChildExecutionInfo{}, UpsertRequestCancelInfos: []*persistence.RequestCancelInfo{}, DeleteRequestCancelInfos: []int64{}, diff --git a/service/history/execution/mutable_state_mock.go b/service/history/execution/mutable_state_mock.go index d42ad657e14..a5c54d6495b 100644 --- a/service/history/execution/mutable_state_mock.go +++ b/service/history/execution/mutable_state_mock.go @@ -1384,6 +1384,20 @@ func (mr *MockMutableStateMockRecorder) GetPendingTimerInfos() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingTimerInfos", reflect.TypeOf((*MockMutableState)(nil).GetPendingTimerInfos)) } +// GetPendingWorkflowTimerTaskInfos mocks base method. +func (m *MockMutableState) GetPendingWorkflowTimerTaskInfos() map[int]*persistence.WorkflowTimerTaskInfo { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPendingWorkflowTimerTaskInfos") + ret0, _ := ret[0].(map[int]*persistence.WorkflowTimerTaskInfo) + return ret0 +} + +// GetPendingWorkflowTimerTaskInfos indicates an expected call of GetPendingWorkflowTimerTaskInfos. +func (mr *MockMutableStateMockRecorder) GetPendingWorkflowTimerTaskInfos() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingWorkflowTimerTaskInfos", reflect.TypeOf((*MockMutableState)(nil).GetPendingWorkflowTimerTaskInfos)) +} + // GetPreviousStartedEventID mocks base method. func (m *MockMutableState) GetPreviousStartedEventID() int64 { m.ctrl.T.Helper() diff --git a/service/history/task/timer_task_executor_base.go b/service/history/task/timer_task_executor_base.go index 257c544315c..7f4afa80645 100644 --- a/service/history/task/timer_task_executor_base.go +++ b/service/history/task/timer_task_executor_base.go @@ -22,6 +22,7 @@ package task import ( "context" + "errors" "github.com/uber/cadence/common" "github.com/uber/cadence/common/backoff" @@ -85,7 +86,6 @@ func (t *timerTaskExecutorBase) executeDeleteHistoryEventTask( ctx context.Context, task *persistence.DeleteHistoryEventTask, ) (retError error) { - wfContext, release, err := t.executionCache.GetOrCreateWorkflowExecutionWithTimeout( task.DomainID, getWorkflowExecution(task), @@ -152,7 +152,6 @@ func (t *timerTaskExecutorBase) deleteWorkflow( context execution.Context, msBuilder execution.MutableState, ) error { - if err := t.deleteWorkflowHistory(ctx, task, msBuilder); err != nil { return err } @@ -169,6 +168,8 @@ func (t *timerTaskExecutorBase) deleteWorkflow( return err } + t.deleteWorkflowTimerTasksBestEffort(ctx, task, msBuilder) + // it must be the last one due to the nature of workflow execution deletion if err := t.deleteWorkflowExecution(ctx, task); err != nil { return err @@ -244,9 +245,13 @@ func (t *timerTaskExecutorBase) archiveWorkflow( if err := t.deleteCurrentWorkflowExecution(ctx, task); err != nil { return err } + + t.deleteWorkflowTimerTasksBestEffort(ctx, task, msBuilder) + if err := t.deleteWorkflowExecution(ctx, task); err != nil { return err } + // calling clear here to force accesses of mutable state to read database // if this is not called then callers will get mutable state even though its been removed from database workflowContext.Clear() @@ -321,7 +326,6 @@ func (t *timerTaskExecutorBase) deleteWorkflowHistory( ShardID: common.IntPtr(t.shard.GetShardID()), DomainName: domainName, }) - } return t.throttleRetry.Do(ctx, op) } @@ -349,6 +353,44 @@ func (t *timerTaskExecutorBase) deleteWorkflowVisibility( return t.throttleRetry.Do(ctx, op) } +func (t *timerTaskExecutorBase) deleteWorkflowTimerTasksBestEffort( + ctx context.Context, + task *persistence.DeleteHistoryEventTask, + msBuilder execution.MutableState, +) { + + if !t.shard.GetConfig().EnableExecutionInfoTracking() { + // feature-flag: to remove once this is defaulted to true + // there's nothing to run if the data's not tracked, but just out of caution, bail out if not enabled + return + } + workflowTimerTasks := msBuilder.GetPendingWorkflowTimerTaskInfos() + + for _, taskInfo := range workflowTimerTasks { + op := func(ctx context.Context) error { + + return t.shard.GetExecutionManager().DeleteTimerTask(ctx, &persistence.DeleteTimerTaskRequest{ + TaskID: taskInfo.TaskID, + VisibilityTimestamp: taskInfo.VisibilityTimestamp, + }) + } + if err := t.throttleRetry.Do(ctx, op); err != nil { + if errors.As(err, new(*types.EntityNotExistsError)) { + // in perhaps a significant minority of cases, it's likely the timer's already fired + continue + } + t.logger.Error("Failed to delete workflow timer task during workflow deletion", + tag.ShardID(t.shard.GetShardID()), + tag.WorkflowDomainID(task.DomainID), + tag.WorkflowID(task.WorkflowID), + tag.WorkflowRunID(task.RunID), + tag.TaskID(taskInfo.TaskID), + tag.Error(err), + ) + } + } +} + func (t *timerTaskExecutorBase) Stop() { t.logger.Info("Stopping timerTaskExecutorBase") t.cancelFn() diff --git a/tools/common/schema/updatetask_test.go b/tools/common/schema/updatetask_test.go index 10f62759f73..dab9d38dd91 100644 --- a/tools/common/schema/updatetask_test.go +++ b/tools/common/schema/updatetask_test.go @@ -116,7 +116,7 @@ func (s *UpdateTaskTestSuite) TestReadSchemaDirFromEmbeddings() { s.NoError(err) ans, err := readSchemaDir(fsys, "0.30", "") s.NoError(err) - s.Equal([]string{"v0.31", "v0.32", "v0.33", "v0.34", "v0.35", "v0.36", "v0.37", "v0.38", "v0.39", "v0.40", "v0.41", "v0.42", "v0.43", "v0.44"}, ans) + s.Equal([]string{"v0.31", "v0.32", "v0.33", "v0.34", "v0.35", "v0.36", "v0.37", "v0.38", "v0.39", "v0.40", "v0.41", "v0.42", "v0.43", "v0.44", "v0.45"}, ans) fsys, err = fs.Sub(cassandra.SchemaFS, "visibility/versioned") s.NoError(err)