Skip to content

Commit

Permalink
Fixes JSDoc display in IDEs (#3780)
Browse files Browse the repository at this point in the history
Fixes #2542

DOC
Co-authored-by: Yannick Assogba <[email protected]>
  • Loading branch information
archie-swif authored Sep 10, 2020
1 parent f586a82 commit de35d6f
Show file tree
Hide file tree
Showing 254 changed files with 888 additions and 588 deletions.
6 changes: 4 additions & 2 deletions tfjs-backend-wasm/src/backend_wasm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -360,8 +360,9 @@ let customFetch = false;
* @param path wasm file path or url
* @param usePlatformFetch optional boolean to use platform fetch to download
* the wasm file, default to false.
*
* @doc {heading: 'Environment', namespace: 'wasm'}
*/
/** @doc {heading: 'Environment', namespace: 'wasm'} */
export function setWasmPath(path: string, usePlatformFetch = false): void {
deprecationWarn(
'setWasmPath has been deprecated in favor of setWasmPaths and' +
Expand Down Expand Up @@ -398,8 +399,9 @@ export function setWasmPath(path: string, usePlatformFetch = false): void {
* WASM binaries have been renamed.
* @param usePlatformFetch optional boolean to use platform fetch to download
* the wasm file, default to false.
*
* @doc {heading: 'Environment', namespace: 'wasm'}
*/
/** @doc {heading: 'Environment', namespace: 'wasm'} */
export function setWasmPaths(
prefixOrFileMap: string|{[key in WasmBinaryName]?: string},
usePlatformFetch = false): void {
Expand Down
3 changes: 2 additions & 1 deletion tfjs-backend-webgl/src/webgl.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,9 @@ export {gpgpu_util, webgl_util};

/**
* Enforce use of half precision textures if available on the platform.
*
* @doc {heading: 'Environment', namespace: 'webgl'}
*/
/** @doc {heading: 'Environment', namespace: 'webgl'} */
export function forceHalfFloat(): void {
env().set('WEBGL_FORCE_F16_TEXTURES', true);
}
24 changes: 15 additions & 9 deletions tfjs-converter/src/executor/graph_model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,9 @@ export const DEFAULT_MODEL_NAME = 'model.json';
* A `tf.GraphModel` can only be created by loading from a model converted from
* a [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) using
* the command line converter tool and loaded via `tf.loadGraphModel`.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
/** @doc {heading: 'Models', subheading: 'Classes'} */
export class GraphModel implements InferenceModel {
private executor: GraphExecutor;
private version = 'n/a';
Expand Down Expand Up @@ -122,8 +123,9 @@ export class GraphModel implements InferenceModel {
/**
* Synchronously construct the in memory weight map and
* compile the inference graph.
*
* @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}
*/
/** @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true} */
loadSync(artifacts:io.ModelArtifacts) {
this.artifacts = artifacts;
const graph = this.artifacts.modelTopology as tensorflow.IGraphDef;
Expand Down Expand Up @@ -184,8 +186,7 @@ export class GraphModel implements InferenceModel {
* @returns A `Promise` of `SaveResult`, which summarizes the result of
* the saving, such as byte sizes of the saved artifacts for the model's
* topology and weight values.
*/
/**
*
* @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true}
*/
async save(handlerOrURL: io.IOHandler|string, config?: io.SaveConfig):
Expand Down Expand Up @@ -246,8 +247,9 @@ export class GraphModel implements InferenceModel {
* @returns Inference result tensors. The output would be single `tf.Tensor`
* if model has single output node, otherwise Tensor[] or NamedTensorMap[]
* will be returned for model with multiple outputs.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
/** @doc {heading: 'Models', subheading: 'Classes'} */
predict(inputs: Tensor|Tensor[]|NamedTensorMap, config?: ModelPredictConfig):
Tensor|Tensor[]|NamedTensorMap {
return this.execute(inputs, this.outputNodes);
Expand Down Expand Up @@ -290,8 +292,9 @@ export class GraphModel implements InferenceModel {
* are provided and there is only one default output, otherwise return a
* tensor array. The order of the tensor array is the same as the outputs
* if provided, otherwise the order of outputNodes attribute of the model.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
/** @doc {heading: 'Models', subheading: 'Classes'} */
execute(inputs: Tensor|Tensor[]|NamedTensorMap, outputs?: string|string[]):
Tensor|Tensor[] {
inputs = this.normalizeInputs(inputs);
Expand All @@ -312,8 +315,9 @@ export class GraphModel implements InferenceModel {
* @returns A Promise of single tensor if provided with a single output or
* no outputs are provided and there is only one default output, otherwise
* return a tensor map.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
/** @doc {heading: 'Models', subheading: 'Classes'} */
async executeAsync(
inputs: Tensor|Tensor[]|NamedTensorMap,
outputs?: string|string[]): Promise<Tensor|Tensor[]> {
Expand All @@ -332,8 +336,9 @@ export class GraphModel implements InferenceModel {

/**
* Releases the memory used by the weight tensors.
*
* @doc {heading: 'Models', subheading: 'Classes'}
*/
/** @doc {heading: 'Models', subheading: 'Classes'} */
dispose() {
this.executor.dispose();
}
Expand Down Expand Up @@ -366,8 +371,9 @@ export class GraphModel implements InferenceModel {
* @param modelUrl The url or an `io.IOHandler` that loads the model.
* @param options Options for the HTTP request, which allows to send credentials
* and custom headers.
*
* @doc {heading: 'Models', subheading: 'Loading'}
*/
/** @doc {heading: 'Models', subheading: 'Loading'} */
export async function loadGraphModel(
modelUrl: string|io.IOHandler,
options: io.LoadOptions = {}): Promise<GraphModel> {
Expand Down
10 changes: 6 additions & 4 deletions tfjs-converter/src/operations/custom_op/register.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,9 @@ const CUSTOM_OPS: {[key: string]: OpMapper} = {};
* has the following attributes:
* - attr: A map from attribute name to its value
* - inputs: A list of input tensors
*
* @doc {heading: 'Models', subheading: 'Op Registry'}
*/
/** @doc {heading: 'Models', subheading: 'Op Registry'} */
export function registerOp(name: string, opFunc: OpExecutor) {
const opMapper: OpMapper = {
tfOpName: name,
Expand All @@ -60,9 +61,9 @@ export function registerOp(name: string, opFunc: OpExecutor) {
* Retrieve the OpMapper object for the registered op.
*
* @param name The Tensorflow Op name.
*
* @doc {heading: 'Models', subheading: 'Op Registry'}
*/
/** @doc {heading: 'Models', subheading: 'Op Registry'} */

export function getRegisteredOp(name: string): OpMapper {
return CUSTOM_OPS[name];
}
Expand All @@ -71,8 +72,9 @@ export function getRegisteredOp(name: string): OpMapper {
* Deregister the Op for graph model executor.
*
* @param name The Tensorflow Op name.
*
* @doc {heading: 'Models', subheading: 'Op Registry'}
*/
/** @doc {heading: 'Models', subheading: 'Op Registry'} */
export function deregisterOp(name: string) {
delete CUSTOM_OPS[name];
}
3 changes: 2 additions & 1 deletion tfjs-core/src/browser_util.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,9 @@ const delayCallback: Function = (() => {
*
* This is simply a sugar method so that users can do the following:
* `await tf.nextFrame();`
*
* @doc {heading: 'Performance', subheading: 'Timing'}
*/
/** @doc {heading: 'Performance', subheading: 'Timing'} */
function nextFrame(): Promise<void> {
return new Promise<void>(resolve => delayCallback(() => resolve()));
}
Expand Down
6 changes: 4 additions & 2 deletions tfjs-core/src/environment.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,9 @@ export type FlagRegistryEntry = {
* The environment contains evaluated flags as well as the registered platform.
* This is always used as a global singleton and can be retrieved with
* `tf.env()`.
*
* @doc {heading: 'Environment'}
*/
/** @doc {heading: 'Environment'} */
export class Environment {
private flags: Flags = {};
private flagRegistry: {[flagName: string]: FlagRegistryEntry} = {};
Expand Down Expand Up @@ -194,8 +195,9 @@ function parseValue(flagName: string, value: string): FlagValue {
*
* The environment object contains the evaluated feature values as well as the
* active platform.
*
* @doc {heading: 'Environment'}
*/
/** @doc {heading: 'Environment'} */
export function env() {
return ENV;
}
Expand Down
48 changes: 32 additions & 16 deletions tfjs-core/src/globals.ts
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,9 @@ import {getTensorsInContainer} from './tensor_util';
/**
* Enables production mode which disables correctness checks in favor of
* performance.
*
* @doc {heading: 'Environment'}
*/
/** @doc {heading: 'Environment'} */
export function enableProdMode(): void {
env().set('PROD', true);
}
Expand All @@ -44,8 +45,9 @@ export function enableProdMode(): void {
* execution as we do not measure download time in the kernel execution time.
*
* See also: `tf.profile`, `tf.memory`.
*
* @doc {heading: 'Environment'}
*/
/** @doc {heading: 'Environment'} */
export function enableDebugMode(): void {
env().set('DEBUG', true);
}
Expand All @@ -68,16 +70,18 @@ setDeprecationWarningFn(deprecationWarn);

/**
* Dispose all variables kept in backend engine.
*
* @doc {heading: 'Environment'}
*/
/** @doc {heading: 'Environment'} */
export function disposeVariables(): void {
ENGINE.disposeVariables();
}

/**
* It returns the global engine that keeps track of all tensors and backends.
*
* @doc {heading: 'Environment'}
*/
/** @doc {heading: 'Environment'} */
export function engine(): Engine {
return ENGINE;
}
Expand All @@ -100,8 +104,9 @@ export function engine(): Engine {
* WebGL Properties:
* - `numBytesInGPU`: Number of bytes allocated (undisposed) in the GPU only at
* this time.
*
* @doc {heading: 'Performance', subheading: 'Memory'}
*/
/** @doc {heading: 'Performance', subheading: 'Memory'} */
export function memory(): MemoryInfo {
return ENGINE.memory();
}
Expand Down Expand Up @@ -132,8 +137,9 @@ export function memory(): MemoryInfo {
* k.totalBytesSnapshot)}`);
* ```
*
*
* @doc {heading: 'Performance', subheading: 'Profile'}
*/
/** @doc {heading: 'Performance', subheading: 'Profile'} */
export function profile(f: () => (TensorContainer | Promise<TensorContainer>)):
Promise<ProfileInfo> {
return ENGINE.profile(f);
Expand Down Expand Up @@ -176,8 +182,9 @@ export function profile(f: () => (TensorContainer | Promise<TensorContainer>)):
* If debug mode is on, the timing and the memory usage of the function
* will be tracked and displayed on the console using the provided name.
* @param fn The function to execute.
*
* @doc {heading: 'Performance', subheading: 'Memory'}
*/
/** @doc {heading: 'Performance', subheading: 'Memory'} */
export function tidy<T extends TensorContainer>(
nameOrFn: string|ScopeFn<T>, fn?: ScopeFn<T>): T {
return ENGINE.tidy(nameOrFn, fn);
Expand All @@ -191,8 +198,9 @@ export function tidy<T extends TensorContainer>(
* the object is not a `tf.Tensor` or does not contain `Tensors`, nothing
* happens. In general it is safe to pass any object here, except that
* `Promise`s are not supported.
*
* @doc {heading: 'Performance', subheading: 'Memory'}
*/
/** @doc {heading: 'Performance', subheading: 'Memory'} */
export function dispose(container: TensorContainer) {
const tensors = getTensorsInContainer(container);
tensors.forEach(tensor => tensor.dispose());
Expand Down Expand Up @@ -227,8 +235,9 @@ export function dispose(container: TensorContainer) {
* ```
*
* @param result The tensor to keep from being disposed.
*
* @doc {heading: 'Performance', subheading: 'Memory'}
*/
/** @doc {heading: 'Performance', subheading: 'Memory'} */
export function keep<T extends Tensor>(result: T): T {
return ENGINE.keep(result);
}
Expand All @@ -255,8 +264,9 @@ export function keep<T extends Tensor>(result: T): T {
* ```
*
* @param f The function to execute and time.
*
* @doc {heading: 'Performance', subheading: 'Timing'}
*/
/** @doc {heading: 'Performance', subheading: 'Timing'} */
export function time(f: () => void): Promise<TimingInfo> {
return ENGINE.time(f);
}
Expand All @@ -273,8 +283,9 @@ export function time(f: () => void): Promise<TimingInfo> {
* @param backendName The name of the backend. Currently supports
* `'webgl'|'cpu'` in the browser, `'tensorflow'` under node.js
* (requires tfjs-node), and `'wasm'` (requires tfjs-backend-wasm).
*
* @doc {heading: 'Backends'}
*/
/** @doc {heading: 'Backends'} */
export function setBackend(backendName: string): Promise<boolean> {
return ENGINE.setBackend(backendName);
}
Expand All @@ -283,25 +294,28 @@ export function setBackend(backendName: string): Promise<boolean> {
* Returns a promise that resolves when the currently selected backend (or the
* highest priority one) has initialized. Await this promise when you are using
* a backend that has async initialization.
*
* @doc {heading: 'Backends'}
*/
/** @doc {heading: 'Backends'} */
export function ready(): Promise<void> {
return ENGINE.ready();
}

/**
* Returns the current backend name (cpu, webgl, etc). The backend is
* responsible for creating tensors and executing operations on those tensors.
*
* @doc {heading: 'Backends'}
*/
/** @doc {heading: 'Backends'} */
export function getBackend(): string {
return ENGINE.backendName;
}

/**
* Removes a backend and the registered factory.
*
* @doc {heading: 'Backends'}
*/
/** @doc {heading: 'Backends'} */
export function removeBackend(name: string): void {
ENGINE.removeBackend(name);
}
Expand Down Expand Up @@ -336,8 +350,9 @@ export function findBackendFactory(name: string): () =>
* the best backend. Defaults to 1.
* @return False if there is already a registered backend under this name, true
* if not.
*
* @doc {heading: 'Backends'}
*/
/** @doc {heading: 'Backends'} */
export function registerBackend(
name: string, factory: () => KernelBackend | Promise<KernelBackend>,
priority = 1): boolean {
Expand All @@ -349,8 +364,9 @@ export function registerBackend(
* attempt to initialize the best backend. Will throw an error if the highest
* priority backend has async initialization, in which case, you should call
* 'await tf.ready()' before running other code.
*
* @doc {heading: 'Backends'}
*/
/** @doc {heading: 'Backends'} */
export function backend(): KernelBackend {
return ENGINE.backend;
}
Expand Down
Loading

0 comments on commit de35d6f

Please sign in to comment.