2
0
mirror of https://git.missingno.dev/kolibrios-nvme-driver/ synced 2024-09-19 18:11:03 +02:00

add docs to most functions

This commit is contained in:
Abdur-Rahman Mansoor 2024-08-18 14:02:10 -04:00
parent 18a744742c
commit 70792a391a

View File

@ -112,6 +112,9 @@ proc service_proc stdcall, ioctl:dword
endp
; Registers the NVMe disk into KolibriOS. This requires that the
; device was successfully initialized by nvme_init, otherwise this
; has will have undefined behavior.
proc add_nvme_disk stdcall, pci:dword
push esi
@ -224,7 +227,7 @@ endp
; Returns the number of namespaces that are active, note this
; doesn't mean if EAX = 5, then namespaces 1-5 will be active.
; This also sets [pci + pcidev.nn] and [pci + pcidev.nsids]
; to appropriate values
; to their appropriate values.
proc determine_active_nsids stdcall, pci:dword
push ebx esi
@ -255,6 +258,15 @@ proc determine_active_nsids stdcall, pci:dword
endp
; Allocates prp_list_ptr and creates a PRP list there. nprps should
; be set appropriately to the number of PRPs the caller wants to create.
;
; This function should only be called if the conditions for building
; a PRP list are met (see page 68 of the NVMe 1.4.0 spec).
;
; TODO: Currently the code for building recursive PRP lists is untested.
; If you want to test it, do a read/write with a sector count equivalant
; to more than 4MiB. Will test in the future.
proc build_prp_list stdcall, nprps:dword, buf:dword, prp_list_ptr:dword
push esi ebx edi
@ -340,6 +352,25 @@ proc build_prp_list stdcall, nprps:dword, buf:dword, prp_list_ptr:dword
endp
; Allocates PRP1/PRP2. Note that it is not required to call this function
; unless you're doing read and writes with an arbitrary buffer that the
; kernel passes to driver. In most other cases, it's better to just allocate a
; page-aligned buffer.
;
; ns: Pointer to the device's respective namespace struct
;
; prps_ptr: should be a pointer to at least 2 DWORDS (PRP1 and PRP2 respectively),
; the caller is allowed to not initialize PRP1, however PRP2 should explicitly be
; initialized to 0.
;
; prp_list_ptr: pointer to 1 DWORD, the caller must initialize this value to 0.
; If a PRP list is allocated, then prp_list_ptr shall contain the pointer to
; the PRP list. The caller is required to free the allocated memory afterwards.
;
; buf: Pointer to the buffer
;
; On success, the function will return 1 and the PRPs will be initialized. If an
; error occurs (most likely due to memory allocation), the function returns 0.
proc alloc_dptr stdcall, ns:dword, prps_ptr:dword, numsectors:dword, prp_list_ptr:dword, buf:dword
push ebx esi edi
@ -411,6 +442,7 @@ nvme_read:
nvme_write:
mov edx, NVM_CMD_WRITE
; Reads from/writes to the disk
proc nvme_readwrite stdcall, ns:dword, buf:dword, start_sector:qword, numsectors_ptr:dword
push ebx esi edi
@ -472,6 +504,7 @@ proc nvme_readwrite stdcall, ns:dword, buf:dword, start_sector:qword, numsectors
ecx, \
dword [ebx + 8]
; TODO: add non-blocking mechanisms later on
stdcall nvme_poll, [esi + NSINFO.pci]
test eax, eax
jz .fail
@ -505,6 +538,9 @@ proc nvme_readwrite stdcall, ns:dword, buf:dword, start_sector:qword, numsectors
endp
; Detects NVMe devices on the PCI bus and stores them into
; [p_nvme_devices] and sets [pcidevs_len] to the appropriate
; size based off how many NVMe devices there are.
proc detect_nvme
invoke GetPCIList
@ -577,6 +613,10 @@ proc detect_nvme
endp
; Returns 1 if the NVMe device is compatible. 0 otherwise. In practice, the driver
; is compatible with (hopefully) most compliant controllers. This also does some
; initialization for some reason, due to bad design decisions made in the beginning
; but since the code works I haven't felt inclined to change it.
proc device_is_compat stdcall, pci:dword
push esi edx ecx
@ -613,13 +653,12 @@ proc device_is_compat stdcall, pci:dword
endp
; nvme_init: Initializes the NVMe controller
; nvme_init: Initializes the NVMe controller, I/O queues, and namespaces.
proc nvme_init stdcall, pci:dword
push ebx esi edi
mov esi, dword [pci]
; Check the PCI header to see if interrupts are disabled, if so
; we have to re-enable them
invoke PciRead16, dword [esi + pcidev.bus], dword [esi + pcidev.devfn], PCI_header00.command
@ -898,6 +937,7 @@ proc nvme_init stdcall, pci:dword
jz .exit_fail
;DEBUGF DBG_INFO, "nvme%u: Successfully created I/O submission queue 1\n", [edi + pcidev.num]
; TODO: This only registers a single namespace. Add support for more
stdcall determine_active_nsids, [pci]
test eax, eax
jz .exit_fail ; No active NSIDS
@ -989,6 +1029,7 @@ proc nvme_init stdcall, pci:dword
endp
; Returns a new CID for queue #y
proc get_new_cid stdcall, pci:dword, y:dword
mov eax, [pci]
@ -1039,6 +1080,10 @@ proc nvme_enable_ctrl stdcall, pci:dword
endp
; Polls until the device's spinlock is unlocked. Unless
; the "bad timeout" is reached. The lock should be unlocked
; by the interrupt handler when all the commands have been
; completed.
proc nvme_poll stdcall, pci:dword
push esi
@ -1070,7 +1115,8 @@ proc nvme_poll stdcall, pci:dword
endp
; Writes to completion queue 'y' head doorbell
; Writes to completion queue 'y' head doorbell. 'cqh' should
; be the new head value that will be stored in the register.
proc cqyhdbl_write stdcall, pci:dword, y:dword, cqh:dword
push esi edi
@ -1108,7 +1154,8 @@ proc cqyhdbl_write stdcall, pci:dword, y:dword, cqh:dword
endp
; Writes to submission queue 'y' tail doorbell
; Writes to submission queue 'y' tail doorbell. 'cmd' should
; be a pointer to the submission queue struct.
proc sqytdbl_write stdcall, pci:dword, y:word, cmd:dword
push ebx esi edi
@ -1190,6 +1237,8 @@ proc is_queue_full stdcall, tail:word, head:word
endp
; Notifies the controller that all the commands of the respective queue
; have been acknowledged as completed (if any).
proc consume_cq_entries stdcall, pci:dword, queue:dword
push esi edi
@ -1217,6 +1266,18 @@ proc consume_cq_entries stdcall, pci:dword, queue:dword
endp
; Our interrupt handler. Once the controller finishes a command,
; it should generate an interrupt (assuming that no fatal error
; occurred). If an interrupt isn't being generated when it is expected
; to, check the CSTS register to make sure that the error bit isn't being
; set. The controller doesn't generate any interrupts in such cases.
;
; Once a command has complete (successfully or not), the controller will
; add a new completion queue entry and it is the interrupt handler's
; responsibility to write to the appropriate completion queue's head doorbell
; register and update it correctly, otherwise the controller will continue
; to generate interrupts (the most common causes for freezes with the driver,
; in my experience).
proc irq_handler
push ebx esi edi
@ -1230,6 +1291,8 @@ proc irq_handler
add esi, sizeof.pcidev
inc ecx
cmp ecx, ebx
; TODO: Apply solution given by @punk_joker of checking which device
; generated an interrupt.
ja .not_our_irq
mov edi, dword [esi + pcidev.io_addr]
mov dword [edi + NVME_MMIO.INTMS], 0x3
@ -1252,6 +1315,13 @@ proc irq_handler
endp
; Deletes the allocated I/O queues for all of the NVMe devices,
; and shuts down all of the controllers. See page 295-297 of
; the NVMe 1.4.0 spec for details on how shutdown processing
; should occur.
;
; Currently shutdown still has problems on VMWare.
; See: https://git.kolibrios.org/GSoC/kolibrios-nvme-driver/issues/5
proc nvme_cleanup
DEBUGF DBG_INFO, "nvme: Cleaning up...\n"
@ -1296,6 +1366,9 @@ proc nvme_cleanup
cmp ebx, dword [pcidevs_len]
jne .get_pcidev
; NOTE: This code has a bug! It only shuts down the last
; controller, not all of them. Move this inside the loop
; and check if the device is actually valid.
; Shutdown the controller
mov edi, dword [esi + pcidev.io_addr]
mov eax, dword [edi + NVME_MMIO.CC]