Skip to main content

Type Definitions Reference

The @janhq/core library exports TypeScript types for building type-safe extensions. All types are extracted from the source code at ~/workspace/source/core/src/types/.

Model Types

Model

Represents a complete model definition.
type Model = {
  /** The type of the object. Default: "model" */
  object: string
  /** The version of the model */
  version: string
  /** The format of the model */
  format: string
  /** The model download source */
  sources: ModelArtifact[]
  /** The model identifier */
  id: string
  /** The model identifier, modern version of id */
  model?: string
  /** Human-readable name for UI */
  name: string
  /** Unix timestamp when model was created */
  created: number
  /** Model description */
  description: string
  /** Model settings */
  settings: ModelSettingParams
  /** Model runtime parameters */
  parameters: ModelRuntimeParams
  /** Metadata of the model */
  metadata: ModelMetadata
  /** The model engine */
  engine: string
}

ModelInfo

Simplified model information used in requests.
type ModelInfo = {
  id: string
  settings?: ModelSettingParams
  parameters?: ModelRuntimeParams
  engine?: string
}

ModelSettingParams

Configuration settings for a model.
type ModelSettingParams = {
  ctx_len?: number
  ngl?: number
  embedding?: boolean
  n_parallel?: number
  cpu_threads?: number
  prompt_template?: string
  pre_prompt?: string
  system_prompt?: string
  ai_prompt?: string
  user_prompt?: string
  model_path?: string
  llama_model_path?: string
  mmproj?: string
  cont_batching?: boolean
  vision_model?: boolean
  text_model?: boolean
  engine?: boolean
  top_p?: number
  top_k?: number
  min_p?: number
  temperature?: number
  repeat_penalty?: number
  repeat_last_n?: number
  presence_penalty?: number
  frequency_penalty?: number
}

ModelRuntimeParams

Runtime parameters for model inference.
type ModelRuntimeParams = {
  temperature?: number
  max_temperature?: number
  token_limit?: number
  top_k?: number
  top_p?: number
  stream?: boolean
  max_tokens?: number
  stop?: string[]
  frequency_penalty?: number
  presence_penalty?: number
  engine?: string
}

ModelMetadata

Metadata associated with a model.
type ModelMetadata = {
  author: string
  tags: string[]
  size: number
  cover?: string
  default_ctx_len?: number
  default_max_tokens?: number
}

ModelArtifact

Represents a model file artifact.
type ModelArtifact = {
  filename: string
  url: string
}

Message Types

ThreadMessage

Represents a message in a thread.
type ThreadMessage = {
  /** Unique identifier (ULID) */
  id: string
  /** Object name */
  object: string
  /** Thread id (ULID) */
  thread_id: string
  /** Assistant id */
  assistant_id?: string
  /** File attachments */
  attachments?: Array<Attachment> | null
  /** Role of the message author */
  role: ChatCompletionRole
  /** Message content */
  content: ThreadContent[]
  /** Message status */
  status: MessageStatus
  /** Created timestamp (Unix time) */
  created_at: number
  /** Completed timestamp (Unix time) */
  completed_at: number
  /** Additional metadata */
  metadata?: Record<string, unknown>
  /** Message type */
  type?: string
  /** Error code if status is Error */
  error_code?: ErrorCode
  /** Tool call ID */
  tool_call_id?: string
}

MessageRequest

Request object for sending a message.
type MessageRequest = {
  id?: string
  threadId: string
  assistantId?: string
  attachments: Array<Attachment> | null
  messages?: ChatCompletionMessage[]
  model?: ModelInfo
  thread?: Thread
  tools?: MessageTool[]
  engine?: string
  type?: string
}

MessageStatus

Status of a message.
enum MessageStatus {
  /** Message is fully loaded */
  Ready = 'ready',
  /** Message is not fully loaded */
  Pending = 'pending',
  /** Message loaded with error */
  Error = 'error',
  /** Message is cancelled streaming */
  Stopped = 'stopped',
}

ThreadContent

Content within a message.
type ThreadContent = {
  type: ContentType
  // For text and reasoning content
  text?: ContentValue
  // For image content
  image_url?: ImageContentValue
  // For tool call content
  tool_call_id?: string
  tool_name?: string
  input?: unknown
  output?: unknown
}

ContentType

enum ContentType {
  Text = 'text',
  Reasoning = 'reasoning',
  Image = 'image_url',
  ToolCall = 'tool_call',
}

ErrorCode

enum ErrorCode {
  InvalidApiKey = 'invalid_api_key',
  AuthenticationError = 'authentication_error',
  InsufficientQuota = 'insufficient_quota',
  InvalidRequestError = 'invalid_request_error',
  Unknown = 'unknown',
}

Thread Types

Thread

Represents a conversation thread.
type Thread = {
  /** Unique identifier (ULID) */
  id: string
  /** Object name */
  object: string
  /** Thread title */
  title: string
  /** Assistants in thread */
  assistants: ThreadAssistantInfo[]
  /** Created timestamp (Unix time) */
  created: number
  /** Updated timestamp (Unix time) */
  updated: number
  /** Additional metadata */
  metadata?: Record<string, unknown>
}

ThreadAssistantInfo

Assistant information within a thread.
type ThreadAssistantInfo = {
  id: string
  name: string
  model: ModelInfo
  instructions?: string
  tools?: AssistantTool[]
}

ThreadState

State of a thread.
type ThreadState = {
  hasMore: boolean
  waitingForResponse: boolean
  error?: Error
  lastMessage?: string
}

Inference Types

ChatCompletionRole

Role of a message author.
enum ChatCompletionRole {
  System = 'system',
  Assistant = 'assistant',
  User = 'user',
  Tool = 'tool',
}

ChatCompletionMessage

Message format for chat completion.
type ChatCompletionMessage = {
  /** Message contents */
  content?: ChatCompletionMessageContent
  /** Message role */
  role: ChatCompletionRole
  type?: string
  output?: string
  tool_call_id?: string
}

ChatCompletionMessageContent

type ChatCompletionMessageContent =
  | string
  | (ChatCompletionMessageContentText &
      ChatCompletionMessageContentImage &
      ChatCompletionMessageContentDoc)[]

Assistant Types

Assistant

Assistant configuration.
type Assistant = {
  avatar: string
  thread_location: string | undefined
  id: string
  object: string
  created_at: number
  name: string
  description?: string
  model: string
  instructions?: string
  tools?: AssistantTool[]
  file_ids: string[]
  metadata?: Record<string, unknown>
}

AssistantTool

Tool configuration for assistants.
type AssistantTool = {
  type: string
  enabled: boolean
  useTimeWeightedRetriever?: boolean
  settings: any
}

Setting Types

SettingComponentProps

Configuration for extension settings.
type SettingComponentProps = {
  key: string
  title: string
  description: string
  controllerType: ControllerType
  controllerProps:
    | SliderComponentProps
    | CheckboxComponentProps
    | InputComponentProps
    | DropdownComponentProps
  extensionName?: string
  requireModelReload?: boolean
  configType?: ConfigType
  titleKey?: string
  descriptionKey?: string
}

ControllerType

type ControllerType =
  | 'slider'
  | 'checkbox'
  | 'input'
  | 'tag'
  | 'dropdown'

SliderComponentProps

type SliderComponentProps = {
  min: number
  max: number
  step: number
  value: number
}

CheckboxComponentProps

type CheckboxComponentProps = {
  value: boolean
}

InputComponentProps

type InputComponentProps = {
  placeholder: string
  value: string | string[]
  type?: InputType
  textAlign?: 'left' | 'right'
  inputActions?: InputAction[]
}
type DropdownComponentProps = {
  value: string
  type?: InputType
  options?: DropdownOption[]
  recommended?: string
}

Interfaces

ModelInterface

Interface for model management extensions.
interface ModelInterface {
  /** Downloads a model */
  pullModel(model: string, id?: string, name?: string): Promise<void>
  
  /** Cancels model download */
  cancelModelPull(model: string): Promise<void>
  
  /** Deletes a model */
  deleteModel(model: string): Promise<void>
  
  /** Gets downloaded models */
  getModels(): Promise<Model[]>
  
  /** Updates model metadata */
  updateModel(model: Partial<Model>): Promise<Model>
  
  /** Imports existing model file */
  importModel(
    model: string,
    modePath: string,
    name?: string,
    optionType?: OptionType
  ): Promise<void>
  
  /** Get model sources */
  getSources(): Promise<ModelSource[]>
  
  /** Add a model source */
  addSource(source: string): Promise<void>
  
  /** Delete a model source */
  deleteSource(source: string): Promise<void>
}

InferenceInterface

Interface for inference extensions.
interface InferenceInterface {
  /** Processes an inference request */
  inference(data: MessageRequest): Promise<ThreadMessage>
}

Usage Example

import {
  Model,
  Thread,
  ThreadMessage,
  MessageRequest,
  MessageStatus,
  ChatCompletionRole,
} from '@janhq/core'

// Type-safe model definition
const model: Model = {
  object: 'model',
  version: '1.0',
  format: 'gguf',
  sources: [{ filename: 'model.gguf', url: 'https://...' }],
  id: 'my-model',
  name: 'My Model',
  created: Date.now(),
  description: 'A custom model',
  settings: { ctx_len: 2048 },
  parameters: { temperature: 0.7 },
  metadata: {
    author: 'Me',
    tags: ['custom'],
    size: 4096000000,
  },
  engine: 'nitro',
}

// Type-safe message
const message: ThreadMessage = {
  id: 'msg_123',
  object: 'thread.message',
  thread_id: 'thread_456',
  role: ChatCompletionRole.User,
  content: [{ type: ContentType.Text, text: { value: 'Hello', annotations: [] } }],
  status: MessageStatus.Ready,
  created_at: Date.now(),
  completed_at: Date.now(),
}
All types are automatically exported from @janhq/core. Import only what you need for better tree-shaking.

Build docs developers (and LLMs) love